From fef67f31b5c26ad94dd718b4db8f6b136a4ff32d Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Thu, 18 Jul 2019 21:00:20 +0900 Subject: [PATCH 01/76] Update Makefile --- Makefile | 2 +- README.md | 1 + example/enwiki_index_mapping.json | 103 ++++++++++++++++++++++++++++++ 3 files changed, 105 insertions(+), 1 deletion(-) create mode 100644 example/enwiki_index_mapping.json diff --git a/Makefile b/Makefile index 484515f..ecda13d 100644 --- a/Makefile +++ b/Makefile @@ -33,7 +33,7 @@ TARGET_PACKAGES = $(shell find . -name 'main.go' -print0 | xargs -0 -n1 dirname ifeq ($(VERSION),) VERSION = latest endif -LDFLAGS = -ldflags "-X \"github.com/mosuka/blast/version.Version=$(VERSION)\"" +LDFLAGS = -ldflags "-s -w -X \"github.com/mosuka/blast/version.Version=$(VERSION)\"" ifeq ($(GOOS),windows) BIN_EXT = .exe diff --git a/README.md b/README.md index 11b9d81..a66acb2 100644 --- a/README.md +++ b/README.md @@ -47,6 +47,7 @@ Blast requires some C/C++ libraries if you need to enable cld2, icu, libstemmer ### Ubuntu 18.10 ```bash +$ sudo apt-get update $ sudo apt-get install -y \ libicu-dev \ libstemmer-dev \ diff --git a/example/enwiki_index_mapping.json b/example/enwiki_index_mapping.json new file mode 100644 index 0000000..2ef6200 --- /dev/null +++ b/example/enwiki_index_mapping.json @@ -0,0 +1,103 @@ +{ + "types": { + "enwiki": { + "enabled": true, + "dynamic": true, + "properties": { + "title_en": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "en", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "en" + }, + "text_en": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "en", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "en" + }, + "url": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "keyword", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "keyword" + }, + "timestamp": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "datetime", + "store": true, + "index": true, + "include_in_all": true + } + ], + "default_analyzer": "" + }, + "_type": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "keyword", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "keyword" + } + }, + "default_analyzer": "en" + } + }, + "default_mapping": { + "enabled": true, + "dynamic": true, + "default_analyzer": "standard" + }, + "type_field": "_type", + "default_type": "_default", + "default_analyzer": "standard", + "default_datetime_parser": "dateTimeOptional", + "default_field": "_all", + "store_dynamic": true, + "index_dynamic": true, + "analysis": { + "analyzers": {}, + "char_filters": {}, + "tokenizers": {}, + "token_filters": {}, + "token_maps": {} + } +} From 47b9e91fe0ac68b3bd4e986ef9cd75bac9e6fbf1 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Wed, 24 Jul 2019 12:22:51 +0900 Subject: [PATCH 02/76] New CLI (#82) --- Dockerfile | 6 +- README.md | 421 ++++--- .../{delete_value.go => cluster_delete.go} | 4 +- cmd/blast/{get_value.go => cluster_get.go} | 4 +- cmd/blast/cluster_node_health.go | 71 ++ .../{get_node.go => cluster_node_info.go} | 14 +- cmd/blast/cluster_node_leave.go | 56 + .../{snapshot.go => cluster_node_snapshot.go} | 4 +- .../cluster_node_start.go} | 70 +- .../{get_cluster.go => cluster_peers_info.go} | 4 +- ...atch_cluster.go => cluster_peers_watch.go} | 6 +- cmd/blast/{set_value.go => cluster_set.go} | 4 +- .../{watch_store.go => cluster_watch.go} | 4 +- ...{set_document.go => distributor_delete.go} | 72 +- cmd/blast/distributor_get.go | 59 + cmd/blast/distributor_index.go | 141 +++ cmd/blast/distributor_node_health.go | 71 ++ .../distributor_node_start.go} | 32 +- cmd/blast/distributor_search.go | 93 ++ .../{delete_document.go => indexer_delete.go} | 56 +- cmd/blast/{get_document.go => indexer_get.go} | 4 +- cmd/blast/indexer_index.go | 141 +++ cmd/blast/indexer_node_health.go | 71 ++ cmd/blast/indexer_node_info.go | 65 + cmd/blast/indexer_node_leave.go | 56 + ...elete_node.go => indexer_node_snapshot.go} | 8 +- .../indexer_node_start.go} | 79 +- cmd/blast/indexer_peers_info.go | 53 + cmd/blast/indexer_peers_watch.go | 82 ++ cmd/blast/{search.go => indexer_search.go} | 47 +- cmd/blast/livenessprobe.go | 47 - cmd/blast/main.go | 1099 ++++++++++++++--- cmd/blast/readinessprobe.go | 47 - cmd/blast/set_node.go | 56 - cmd/blastd/main.go | 284 ----- dispatcher/grpc_service.go | 15 +- dispatcher/http_handler.go | 159 ++- dispatcher/server_test.go | 2 +- example/geo_doc1.json | 18 - example/geo_doc2.json | 20 - example/geo_doc3.json | 20 - example/geo_doc4.json | 20 - example/geo_doc5.json | 20 - example/geo_doc6.json | 20 - example/geo_doc_1.json | 21 + example/geo_doc_2.json | 23 + example/geo_doc_3.json | 23 + example/geo_doc_4.json | 23 + example/geo_doc_5.json | 23 + example/geo_doc_6.json | 23 + example/wiki_bulk_delete.json | 6 - example/wiki_bulk_delete.txt | 4 + example/wiki_bulk_index.json | 38 - example/wiki_bulk_index.txt | 36 + example/wiki_doc_arwiki_1.json | 11 +- example/wiki_doc_bgwiki_1.json | 11 +- example/wiki_doc_cawiki_1.json | 11 +- example/wiki_doc_cswiki_1.json | 11 +- example/wiki_doc_dawiki_1.json | 11 +- example/wiki_doc_dewiki_1.json | 11 +- example/wiki_doc_elwiki_1.json | 11 +- example/wiki_doc_enwiki_1.json | 11 +- example/wiki_doc_eswiki_1.json | 11 +- example/wiki_doc_fawiki_1.json | 11 +- example/wiki_doc_fiwiki_1.json | 11 +- example/wiki_doc_frwiki_1.json | 11 +- example/wiki_doc_gawiki_1.json | 11 +- example/wiki_doc_glwiki_1.json | 11 +- example/wiki_doc_guwiki_1.json | 11 +- example/wiki_doc_hiwiki_1.json | 11 +- example/wiki_doc_huwiki_1.json | 11 +- example/wiki_doc_hywiki_1.json | 11 +- example/wiki_doc_idwiki_1.json | 11 +- example/wiki_doc_itwiki_1.json | 11 +- example/wiki_doc_jawiki_1.json | 11 +- example/wiki_doc_knwiki_1.json | 11 +- example/wiki_doc_kowiki_1.json | 11 +- example/wiki_doc_mlwiki_1.json | 11 +- example/wiki_doc_nlwiki_1.json | 11 +- example/wiki_doc_nowiki_1.json | 11 +- example/wiki_doc_pswiki_1.json | 11 +- example/wiki_doc_ptwiki_1.json | 11 +- example/wiki_doc_rowiki_1.json | 11 +- example/wiki_doc_ruwiki_1.json | 11 +- example/wiki_doc_svwiki_1.json | 11 +- example/wiki_doc_tawiki_1.json | 11 +- example/wiki_doc_tewiki_1.json | 11 +- example/wiki_doc_thwiki_1.json | 11 +- example/wiki_doc_trwiki_1.json | 11 +- example/wiki_doc_zhwiki_1.json | 11 +- example/wiki_search_request_simple.json | 2 +- grpc/client.go | 8 +- indexer/grpc_service.go | 11 +- indexer/http_handler.go | 164 ++- indexer/raft_server.go | 40 +- indexer/server_test.go | 302 +++-- indexutils/document.go | 65 + manager/server_test.go | 38 +- 98 files changed, 3385 insertions(+), 1486 deletions(-) rename cmd/blast/{delete_value.go => cluster_delete.go} (92%) rename cmd/blast/{get_value.go => cluster_get.go} (93%) create mode 100644 cmd/blast/cluster_node_health.go rename cmd/blast/{get_node.go => cluster_node_info.go} (79%) create mode 100644 cmd/blast/cluster_node_leave.go rename cmd/blast/{snapshot.go => cluster_node_snapshot.go} (91%) rename cmd/{blastd/manager.go => blast/cluster_node_start.go} (63%) rename cmd/blast/{get_cluster.go => cluster_peers_info.go} (93%) rename cmd/blast/{watch_cluster.go => cluster_peers_watch.go} (93%) rename cmd/blast/{set_value.go => cluster_set.go} (94%) rename cmd/blast/{watch_store.go => cluster_watch.go} (96%) rename cmd/blast/{set_document.go => distributor_delete.go} (55%) create mode 100644 cmd/blast/distributor_get.go create mode 100644 cmd/blast/distributor_index.go create mode 100644 cmd/blast/distributor_node_health.go rename cmd/{blastd/dispatcher.go => blast/distributor_node_start.go} (79%) create mode 100644 cmd/blast/distributor_search.go rename cmd/blast/{delete_document.go => indexer_delete.go} (60%) rename cmd/blast/{get_document.go => indexer_get.go} (93%) create mode 100644 cmd/blast/indexer_index.go create mode 100644 cmd/blast/indexer_node_health.go create mode 100644 cmd/blast/indexer_node_info.go create mode 100644 cmd/blast/indexer_node_leave.go rename cmd/blast/{delete_node.go => indexer_node_snapshot.go} (86%) rename cmd/{blastd/indexer.go => blast/indexer_node_start.go} (63%) create mode 100644 cmd/blast/indexer_peers_info.go create mode 100644 cmd/blast/indexer_peers_watch.go rename cmd/blast/{search.go => indexer_search.go} (63%) delete mode 100644 cmd/blast/livenessprobe.go delete mode 100644 cmd/blast/readinessprobe.go delete mode 100644 cmd/blast/set_node.go delete mode 100644 cmd/blastd/main.go delete mode 100644 example/geo_doc1.json delete mode 100644 example/geo_doc2.json delete mode 100644 example/geo_doc3.json delete mode 100644 example/geo_doc4.json delete mode 100644 example/geo_doc5.json delete mode 100644 example/geo_doc6.json create mode 100644 example/geo_doc_1.json create mode 100644 example/geo_doc_2.json create mode 100644 example/geo_doc_3.json create mode 100644 example/geo_doc_4.json create mode 100644 example/geo_doc_5.json create mode 100644 example/geo_doc_6.json delete mode 100644 example/wiki_bulk_delete.json create mode 100644 example/wiki_bulk_delete.txt delete mode 100644 example/wiki_bulk_index.json create mode 100644 example/wiki_bulk_index.txt create mode 100644 indexutils/document.go diff --git a/Dockerfile b/Dockerfile index 06bdb1b..4b91268 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM golang:1.12.1-stretch +FROM golang:1.12.7-stretch ARG VERSION @@ -67,7 +67,7 @@ COPY --from=0 /go/src/github.com/blevesearch/cld2/cld2/internal/*.so /usr/local/ COPY --from=0 /go/src/github.com/mosuka/blast/bin/* /usr/bin/ COPY --from=0 /go/src/github.com/mosuka/blast/docker-entrypoint.sh /usr/bin/ -EXPOSE 5000 5001 5002 +EXPOSE 2000 5000 8000 ENTRYPOINT [ "/usr/bin/docker-entrypoint.sh" ] -CMD [ "blastd", "--help" ] +CMD [ "blast", "--help" ] diff --git a/README.md b/README.md index a66acb2..ba90827 100644 --- a/README.md +++ b/README.md @@ -245,13 +245,13 @@ $ make \ Running a Blast in standalone mode is easy. Start a indexer like so: ```bash -$ ./bin/blastd \ - indexer \ +$ ./bin/blast indexer node start \ + --grpc-address=:5000 \ + --http-address=:8000 \ --node-id=indexer1 \ - --bind-addr=:5000 \ - --grpc-addr=:5001 \ - --http-addr=:5002 \ + --node-address=:2000 \ --data-dir=/tmp/blast/indexer1 \ + --raft-storage-type=boltdb \ --index-mapping-file=./example/wiki_index_mapping.json \ --index-type=upside_down \ --index-storage-type=boltdb @@ -263,6 +263,28 @@ Please refer to following document for details of index mapping: - http://blevesearch.com/docs/Index-Mapping/ - https://github.com/blevesearch/bleve/blob/master/mapping/index.go#L43 +You can check the node with the following command: + +```bash +$ ./bin/blast indexer node info --grpc-address=:5000 +``` + +You can see the result in JSON format. The result of the above command is: + +```json +{ + "node_config": { + "bind_addr": ":2000", + "data_dir": "/tmp/blast/indexer1", + "grpc_addr": ":5000", + "http_addr": ":8000", + "node_id": "indexer1", + "raft_storage_type": "boltdb" + }, + "state": "Leader" +} +``` + You can now put, get, search and delete the documents via CLI. @@ -271,7 +293,20 @@ You can now put, get, search and delete the documents via CLI. For document indexing, execute the following command: ```bash -$ cat ./example/wiki_doc_enwiki_1.json | xargs -0 ./bin/blast set document --grpc-addr=:5001 enwiki_1 +$ ./bin/blast indexer index --grpc-address=:5000 enwiki_1 ' +{ + "title_en": "Search engine (computing)", + "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "_type": "enwiki" +} +' +``` + +or + +```bash +$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/wiki_doc_enwiki_1.json ``` You can see the result in JSON format. The result of the above command is: @@ -286,7 +321,7 @@ You can see the result in JSON format. The result of the above command is: Getting a document is as following: ```bash -$ ./bin/blast get document --grpc-addr=:5001 enwiki_1 +$ ./bin/blast indexer get --grpc-address=:5000 enwiki_1 ``` You can see the result in JSON format. The result of the above command is: @@ -306,7 +341,7 @@ You can see the result in JSON format. The result of the above command is: Searching documents is as like following: ```bash -$ cat ./example/wiki_search_request.json | xargs -0 ./bin/blast search --grpc-addr=:5001 +$ ./bin/blast indexer search --grpc-address=:5000 --file=./example/wiki_search_request.json ``` You can see the result in JSON format. The result of the above command is: @@ -335,10 +370,6 @@ You can see the result in JSON format. The result of the above command is: "*" ], "facets": { - "Contributor count": { - "size": 10, - "field": "contributor" - }, "Timestamp range": { "size": 10, "field": "timestamp", @@ -354,11 +385,17 @@ You can see the result in JSON format. The result of the above command is: "start": "2011-01-01T00:00:00Z" } ] + }, + "Type count": { + "size": 10, + "field": "_type" } }, "explain": false, "sort": [ - "-_score" + "-_score", + "_id", + "-timestamp" ], "includeLocations": false }, @@ -420,7 +457,9 @@ You can see the result in JSON format. The result of the above command is: } }, "sort": [ - "_score" + "_score", + "enwiki_1", + " \u0001\u0015\u001f\u0004~80Pp\u0000" ], "fields": { "_type": "enwiki", @@ -432,14 +471,8 @@ You can see the result in JSON format. The result of the above command is: ], "total_hits": 1, "max_score": 0.09703538256409851, - "took": 201951, + "took": 688819, "facets": { - "Contributor count": { - "field": "contributor", - "total": 0, - "missing": 1, - "other": 0 - }, "Timestamp range": { "field": "timestamp", "total": 1, @@ -453,6 +486,18 @@ You can see the result in JSON format. The result of the above command is: "count": 1 } ] + }, + "Type count": { + "field": "_type", + "total": 1, + "missing": 0, + "other": 0, + "terms": [ + { + "term": "enwiki", + "count": 1 + } + ] } } } @@ -471,7 +516,7 @@ Please refer to following document for details of search request and result: Deleting a document is as following: ```bash -$ ./bin/blast delete document --grpc-addr=:5001 enwiki_1 +$ ./bin/blast indexer delete --grpc-address=:5000 enwiki_1 ``` You can see the result in JSON format. The result of the above command is: @@ -486,13 +531,13 @@ You can see the result in JSON format. The result of the above command is: Indexing documents in bulk, run the following command: ```bash -$ cat ./example/wiki_bulk_index.json | xargs -0 ./bin/blast set document --grpc-addr=:5001 +$ ./bin/blast indexer index --grpc-address=:5000 --file=./example/wiki_bulk_index.txt --bulk ``` You can see the result in JSON format. The result of the above command is: ```bash -4 +36 ``` @@ -501,7 +546,7 @@ You can see the result in JSON format. The result of the above command is: Deleting documents in bulk, run the following command: ```bash -$ cat ./example/wiki_bulk_delete.json | xargs -0 ./bin/blast delete document --grpc-addr=:5001 +$ ./bin/blast indexer delete --grpc-address=:5000 --file=./example/wiki_bulk_delete.txt ``` You can see the result in JSON format. The result of the above command is: @@ -521,16 +566,28 @@ Also you can do above commands via HTTP REST API that listened port 5002. Indexing a document via HTTP is as following: ```bash -$ curl -X PUT 'http://127.0.0.1:5002/documents/enwiki_1' -d @./example/wiki_doc_enwiki_1.json +$ curl -X PUT 'http://127.0.0.1:8000/documents/enwiki_1' --data-binary ' +{ + "title_en": "Search engine (computing)", + "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "_type": "enwiki" +} +' ``` +or + +```bash +$ curl -X PUT 'http://127.0.0.1:8000/documents' -H 'Content-Type: application/json' --data-binary @./example/wiki_doc_enwiki_1.json +``` ### Getting a document via HTTP REST API Getting a document via HTTP is as following: ```bash -$ curl -X GET 'http://127.0.0.1:5002/documents/enwiki_1' +$ curl -X GET 'http://127.0.0.1:8000/documents/enwiki_1' ``` @@ -539,7 +596,7 @@ $ curl -X GET 'http://127.0.0.1:5002/documents/enwiki_1' Searching documents via HTTP is as following: ```bash -$ curl -X POST 'http://127.0.0.1:5002/search' -d @./example/wiki_search_request.json +$ curl -X POST 'http://127.0.0.1:8000/search' --data-binary @./example/wiki_search_request.json ``` @@ -548,7 +605,7 @@ $ curl -X POST 'http://127.0.0.1:5002/search' -d @./example/wiki_search_request. Deleting a document via HTTP is as following: ```bash -$ curl -X DELETE 'http://127.0.0.1:5002/documents/enwiki_1' +$ curl -X DELETE 'http://127.0.0.1:8000/documents/enwiki_1' ``` @@ -557,7 +614,7 @@ $ curl -X DELETE 'http://127.0.0.1:5002/documents/enwiki_1' Indexing documents in bulk via HTTP is as following: ```bash -$ curl -X PUT 'http://127.0.0.1:5002/documents' -d @./example/wiki_bulk_index.json +$ curl -X PUT 'http://127.0.0.1:8000/documents?bulk=true' --data-binary @./example/wiki_bulk_index.txt ``` @@ -566,7 +623,7 @@ $ curl -X PUT 'http://127.0.0.1:5002/documents' -d @./example/wiki_bulk_index.js Deleting documents in bulk via HTTP is as following: ```bash -$ curl -X DELETE 'http://127.0.0.1:5002/documents' -d @./example/wiki_bulk_delete.json +$ curl -X DELETE 'http://127.0.0.1:8000/documents' --data-binary @./example/wiki_bulk_delete.txt ``` @@ -579,13 +636,13 @@ Blast can easily bring up a cluster. Running a Blast in standalone is not fault First of all, start a indexer in standalone. ```bash -$ ./bin/blastd \ - indexer \ +$ ./bin/blast indexer node start \ + --grpc-address=:5000 \ + --http-address=:8000 \ --node-id=indexer1 \ - --bind-addr=:5000 \ - --grpc-addr=:5001 \ - --http-addr=:5002 \ + --node-address=:2000 \ --data-dir=/tmp/blast/indexer1 \ + --raft-storage-type=boltdb \ --index-mapping-file=./example/wiki_index_mapping.json \ --index-type=upside_down \ --index-storage-type=boltdb @@ -594,23 +651,23 @@ $ ./bin/blastd \ Then, start two more indexers. ```bash -$ ./bin/blastd \ - indexer \ - --peer-addr=:5001 \ +$ ./bin/blast indexer node start \ + --peer-grpc-address=:5000 \ + --grpc-address=:5010 \ + --http-address=:8010 \ --node-id=indexer2 \ - --bind-addr=:5010 \ - --grpc-addr=:5011 \ - --http-addr=:5012 \ - --data-dir=/tmp/blast/indexer2 - -$ ./bin/blastd \ - indexer \ - --peer-addr=:5001 \ + --node-address=:2010 \ + --data-dir=/tmp/blast/indexer2 \ + --raft-storage-type=boltdb + +$ ./bin/blast indexer node start \ + --peer-grpc-address=:5000 \ + --grpc-address=:5020 \ + --http-address=:8020 \ --node-id=indexer3 \ - --bind-addr=:5020 \ - --grpc-addr=:5021 \ - --http-addr=:5022 \ - --data-dir=/tmp/blast/indexer3 + --node-address=:2020 \ + --data-dir=/tmp/blast/indexer3 \ + --raft-storage-type=boltdb ``` _Above example shows each Blast node running on the same host, so each node must listen on different ports. This would not be necessary if each node ran on a different host._ @@ -620,7 +677,7 @@ So you have a 3-node cluster. That way you can tolerate the failure of 1 node. Y ```bash -$ ./bin/blast get cluster --grpc-addr=:5001 +$ ./bin/blast indexer peers info --grpc-address=:5000 ``` You can see the result in JSON format. The result of the above command is: @@ -629,10 +686,10 @@ You can see the result in JSON format. The result of the above command is: { "indexer1": { "node_config": { - "bind_addr": ":5000", + "bind_addr": ":2000", "data_dir": "/tmp/blast/indexer1", - "grpc_addr": ":5001", - "http_addr": ":5002", + "grpc_addr": ":5000", + "http_addr": ":8000", "node_id": "indexer1", "raft_storage_type": "boltdb" }, @@ -640,10 +697,10 @@ You can see the result in JSON format. The result of the above command is: }, "indexer2": { "node_config": { - "bind_addr": ":5010", + "bind_addr": ":2010", "data_dir": "/tmp/blast/indexer2", - "grpc_addr": ":5011", - "http_addr": ":5012", + "grpc_addr": ":5010", + "http_addr": ":8010", "node_id": "indexer2", "raft_storage_type": "boltdb" }, @@ -651,10 +708,10 @@ You can see the result in JSON format. The result of the above command is: }, "indexer3": { "node_config": { - "bind_addr": ":5020", + "bind_addr": ":2020", "data_dir": "/tmp/blast/indexer3", - "grpc_addr": ":5021", - "http_addr": ":5022", + "grpc_addr": ":5020", + "http_addr": ":8020", "node_id": "indexer3", "raft_storage_type": "boltdb" }, @@ -668,13 +725,13 @@ Recommend 3 or more odd number of nodes in the cluster. In failure scenarios, da The following command indexes documents to any node in the cluster: ```bash -$ cat ./example/wiki_doc_enwiki_1.json | xargs -0 ./bin/blast set document --grpc-addr=:5001 enwiki_1 +$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/wiki_doc_enwiki_1.json ``` So, you can get the document from the node specified by the above command as follows: ```bash -$ ./bin/blast get document --grpc-addr=:5001 enwiki_1 +$ ./bin/blast indexer get --grpc-address=:5000 enwiki_1 ``` You can see the result in JSON format. The result of the above command is: @@ -692,8 +749,8 @@ You can see the result in JSON format. The result of the above command is: You can also get the same document from other nodes in the cluster as follows: ```bash -$ ./bin/blast get document --grpc-addr=:5011 enwiki_1 -$ ./bin/blast get document --grpc-addr=:5021 enwiki_1 +$ ./bin/blast indexer get --grpc-address=:5010 enwiki_1 +$ ./bin/blast indexer get --grpc-address=:5020 enwiki_1 ``` You can see the result in JSON format. The result of the above command is: @@ -726,36 +783,34 @@ Blast provides the following type of node for federation: Manager can also bring up a cluster like an indexer. Specify a common index mapping for federation at startup. ```bash -$ ./bin/blastd \ - manager \ - --node-id=manager1 \ - --bind-addr=:15000 \ - --grpc-addr=:15001 \ - --http-addr=:15002 \ - --data-dir=/tmp/blast/manager1 \ - --raft-storage-type=badger \ +$ ./bin/blast cluster node start \ + --grpc-address=:5100 \ + --http-address=:8100 \ + --node-id=cluster1 \ + --node-address=:2100 \ + --data-dir=/tmp/blast/cluster1 \ + --raft-storage-type=boltdb \ --index-mapping-file=./example/wiki_index_mapping.json \ --index-type=upside_down \ --index-storage-type=boltdb -$ ./bin/blastd \ - manager \ - --peer-addr=:15001 \ - --node-id=manager2 \ - --bind-addr=:15010 \ - --grpc-addr=:15011 \ - --http-addr=:15012 \ - --data-dir=/tmp/blast/manager2 \ - --raft-storage-type=badger - -$ ./bin/blastd \ - manager \ - --peer-addr=:15001 \ - --node-id=manager3 \ - --bind-addr=:15020 \ - --grpc-addr=:15021 \ - --http-addr=:15022 \ - --data-dir=/tmp/blast/manager3 +$ ./bin/blast cluster node start \ + --peer-grpc-address=:5100 \ + --grpc-address=:5110 \ + --http-address=:8110 \ + --node-id=cluster2 \ + --node-address=:2110 \ + --data-dir=/tmp/blast/cluster2 \ + --raft-storage-type=boltdb + +$ ./bin/blast cluster node start \ + --peer-grpc-address=:5100 \ + --grpc-address=:5120 \ + --http-address=:8120 \ + --node-id=cluster3 \ + --node-address=:2120 \ + --data-dir=/tmp/blast/cluster3 \ + --raft-storage-type=boltdb ``` ### Bring up the indexer cluster. @@ -764,65 +819,65 @@ Federated mode differs from cluster mode that it specifies the manager in start The following example starts two 3-node clusters. ```bash -$ ./bin/blastd \ - indexer \ - --manager-addr=:15001 \ - --cluster-id=cluster1 \ +$ ./bin/blast indexer node start \ + --cluster-grpc-address=:5100 \ + --shard-id=shard1 \ + --grpc-address=:5000 \ + --http-address=:8000 \ --node-id=indexer1 \ - --bind-addr=:5000 \ - --grpc-addr=:5001 \ - --http-addr=:5002 \ - --data-dir=/tmp/blast/indexer1 - -$ ./bin/blastd \ - indexer \ - --manager-addr=:15001 \ - --cluster-id=cluster1 \ + --node-address=:2000 \ + --data-dir=/tmp/blast/indexer1 \ + --raft-storage-type=boltdb + +$ ./bin/blast indexer node start \ + --cluster-grpc-address=:5100 \ + --shard-id=shard1 \ + --grpc-address=:5010 \ + --http-address=:8010 \ --node-id=indexer2 \ - --bind-addr=:5010 \ - --grpc-addr=:5011 \ - --http-addr=:5012 \ - --data-dir=/tmp/blast/indexer2 - -$ ./bin/blastd \ - indexer \ - --manager-addr=:15001 \ - --cluster-id=cluster1 \ + --node-address=:2010 \ + --data-dir=/tmp/blast/indexer2 \ + --raft-storage-type=boltdb + +$ ./bin/blast indexer node start \ + --cluster-grpc-address=:5100 \ + --shard-id=shard1 \ + --grpc-address=:5020 \ + --http-address=:8020 \ --node-id=indexer3 \ - --bind-addr=:5020 \ - --grpc-addr=:5021 \ - --http-addr=:5022 \ - --data-dir=/tmp/blast/indexer3 - -$ ./bin/blastd \ - indexer \ - --manager-addr=:15001 \ - --cluster-id=cluster2 \ + --node-address=:2020 \ + --data-dir=/tmp/blast/indexer3 \ + --raft-storage-type=boltdb + +$ ./bin/blast indexer node start \ + --cluster-grpc-address=:5100 \ + --shard-id=shard2 \ + --grpc-address=:5030 \ + --http-address=:8030 \ --node-id=indexer4 \ - --bind-addr=:5030 \ - --grpc-addr=:5031 \ - --http-addr=:5032 \ - --data-dir=/tmp/blast/indexer4 - -$ ./bin/blastd \ - indexer \ - --manager-addr=:15001 \ - --cluster-id=cluster2 \ + --node-address=:2030 \ + --data-dir=/tmp/blast/indexer4 \ + --raft-storage-type=boltdb + +$ ./bin/blast indexer node start \ + --cluster-grpc-address=:5100 \ + --shard-id=shard2 \ + --grpc-address=:5040 \ + --http-address=:8040 \ --node-id=indexer5 \ - --bind-addr=:5040 \ - --grpc-addr=:5041 \ - --http-addr=:5042 \ - --data-dir=/tmp/blast/indexer5 - -$ ./bin/blastd \ - indexer \ - --manager-addr=:15001 \ - --cluster-id=cluster2 \ + --node-address=:2040 \ + --data-dir=/tmp/blast/indexer5 \ + --raft-storage-type=boltdb + +$ ./bin/blast indexer node start \ + --cluster-grpc-address=:5100 \ + --shard-id=shard2 \ + --grpc-address=:5050 \ + --http-address=:8050 \ --node-id=indexer6 \ - --bind-addr=:5050 \ - --grpc-addr=:5051 \ - --http-addr=:5052 \ - --data-dir=/tmp/blast/indexer6 + --node-address=:2050 \ + --data-dir=/tmp/blast/indexer6 \ + --raft-storage-type=boltdb ``` ### Start up the dispatcher. @@ -830,23 +885,22 @@ $ ./bin/blastd \ Finally, start the dispatcher with a manager that manages the target federation so that it can perform distributed search and indexing. ```bash -$ ./bin/blastd \ - dispatcher \ - --manager-addr=:15001 \ - --grpc-addr=:25001 \ - --http-addr=:25002 +$ ./bin/blast distributor node start \ + --cluster-grpc-address=:5100 \ + --grpc-address=:5200 \ + --http-address=:8200 ``` ```bash -$ cat ./example/wiki_bulk_index.json | xargs -0 ./bin/blast set document --grpc-addr=:25001 +$ ./bin/blast distributor index --grpc-address=:5200 --file=./example/wiki_bulk_index.txt --bulk ``` ```bash -$ cat ./example/wiki_search_request.json | xargs -0 ./bin/blast search --grpc-addr=:25001 +$ ./bin/blast distributor search --grpc-address=:5200 --file=./example/wiki_search_request_simple.json ``` ```bash -$ cat ./example/wiki_bulk_delete.json | xargs -0 ./bin/blast delete document --grpc-addr=:25001 +$ ./bin/blast distributor delete --grpc-address=:5200 --file=./example/wiki_bulk_delete.txt ``` @@ -887,24 +941,26 @@ Running a Blast data node on Docker. Start Blast data node like so: ```bash $ docker run --rm --name blast-indexer1 \ + -p 2000:2000 \ -p 5000:5000 \ - -p 5001:5001 \ - -p 5002:5002 \ + -p 8000:8000 \ -v $(pwd)/example:/opt/blast/example \ - mosuka/blast:latest blastd indexer \ + mosuka/blast:latest blast indexer node start \ + --grpc-address=:5000 \ + --http-address=:8000 \ --node-id=blast-indexer1 \ - --bind-addr=:5000 \ - --grpc-addr=:5001 \ - --http-addr=:5002 \ + --node-address=:2000 \ --data-dir=/tmp/blast/indexer1 \ + --raft-storage-type=boltdb \ --index-mapping-file=/opt/blast/example/wiki_index_mapping.json \ - --index-storage-type=leveldb + --index-type=upside_down \ + --index-storage-type=boltdb ``` You can execute the command in docker container as follows: ```bash -$ docker exec -it blast-indexer1 blast-indexer node --grpc-addr=:7070 +$ docker exec -it blast-indexer1 blast indexer node info --grpc-address=:5000 ``` @@ -936,15 +992,30 @@ $ ./WikiExtractor.py -o ~/tmp/enwiki --json ~/tmp/enwiki-20190101-pages-articles ``` +### Starting Indexer + +```bash +$ ./bin/blast indexer node start \ + --grpc-address=:5000 \ + --http-address=:8000 \ + --node-id=indexer1 \ + --node-address=:2000 \ + --data-dir=/tmp/blast/indexer1 \ + --raft-storage-type=boltdb \ + --index-mapping-file=./example/enwiki_index_mapping.json \ + --index-type=upside_down \ + --index-storage-type=boltdb +``` + ### Indexing wikipedia dump -```shell +```bash $ for FILE in $(find ~/tmp/enwiki -type f -name '*' | sort) do echo "Indexing ${FILE}" TIMESTAMP=$(date -u "+%Y-%m-%dT%H:%M:%SZ") - DOCS=$(cat ${FILE} | jq -r '. + {fields: {url: .url, title_en: .title, text_en: .text, timestamp: "'${TIMESTAMP}'", _type: "enwiki"}} | del(.url) | del(.title) | del(.text) | del(.fields.id)' | jq -s) - curl -s -X PUT -H 'Content-Type: application/json' "http://127.0.0.1:5002/documents" -d "${DOCS}" + DOCS=$(cat ${FILE} | jq -r '. + {fields: {url: .url, title_en: .title, text_en: .text, timestamp: "'${TIMESTAMP}'", _type: "enwiki"}} | del(.url) | del(.title) | del(.text) | del(.fields.id)' | jq -c) + curl -s -X PUT -H 'Content-Type: application/json' "http://127.0.0.1:8000/documents?bulk=true" --data-binary "${DOCS}" done ``` @@ -956,13 +1027,13 @@ This section explain how to index Spatial/Geospatial data to Blast. ### Starting Indexer with Spatial/Geospatial index mapping ```bash -$ ./bin/blastd \ - indexer \ +$ ./bin/blast indexer node start \ + --grpc-address=:5000 \ + --http-address=:8000 \ --node-id=indexer1 \ - --bind-addr=:5000 \ - --grpc-addr=:5001 \ - --http-addr=:5002 \ + --node-address=:2000 \ --data-dir=/tmp/blast/indexer1 \ + --raft-storage-type=boltdb \ --index-mapping-file=./example/geo_index_mapping.json \ --index-type=upside_down \ --index-storage-type=boltdb @@ -971,16 +1042,16 @@ $ ./bin/blastd \ ### Indexing example Spatial/Geospatial data ```bash -$ cat ./example/geo_doc1.json | xargs -0 ./bin/blast set document --grpc-addr=:5001 geo_doc1 -$ cat ./example/geo_doc2.json | xargs -0 ./bin/blast set document --grpc-addr=:5001 geo_doc2 -$ cat ./example/geo_doc3.json | xargs -0 ./bin/blast set document --grpc-addr=:5001 geo_doc3 -$ cat ./example/geo_doc4.json | xargs -0 ./bin/blast set document --grpc-addr=:5001 geo_doc4 -$ cat ./example/geo_doc5.json | xargs -0 ./bin/blast set document --grpc-addr=:5001 geo_doc5 -$ cat ./example/geo_doc6.json | xargs -0 ./bin/blast set document --grpc-addr=:5001 geo_doc6 +$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/geo_doc_1.json +$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/geo_doc_2.json +$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/geo_doc_3.json +$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/geo_doc_4.json +$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/geo_doc_5.json +$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/geo_doc_6.json ``` ### Searching example Spatial/Geospatial data ```bash -$ cat ./example/geo_search_request.json | xargs -0 ./bin/blast search --grpc-addr=:5001 +$ ./bin/blast indexer search --grpc-address=:5000 --file=./example/geo_search_request.json ``` diff --git a/cmd/blast/delete_value.go b/cmd/blast/cluster_delete.go similarity index 92% rename from cmd/blast/delete_value.go rename to cmd/blast/cluster_delete.go index 93413e4..2cdff16 100644 --- a/cmd/blast/delete_value.go +++ b/cmd/blast/cluster_delete.go @@ -23,8 +23,8 @@ import ( "github.com/urfave/cli" ) -func execDeleteValue(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") +func clusterDelete(c *cli.Context) error { + grpcAddr := c.String("grpc-address") key := c.Args().Get(0) if key == "" { diff --git a/cmd/blast/get_value.go b/cmd/blast/cluster_get.go similarity index 93% rename from cmd/blast/get_value.go rename to cmd/blast/cluster_get.go index d4ec2ed..ee60520 100644 --- a/cmd/blast/get_value.go +++ b/cmd/blast/cluster_get.go @@ -23,8 +23,8 @@ import ( "github.com/urfave/cli" ) -func execGetValue(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") +func clusterGet(c *cli.Context) error { + grpcAddr := c.String("grpc-address") key := c.Args().Get(0) diff --git a/cmd/blast/cluster_node_health.go b/cmd/blast/cluster_node_health.go new file mode 100644 index 0000000..79bc6f8 --- /dev/null +++ b/cmd/blast/cluster_node_health.go @@ -0,0 +1,71 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "fmt" + "os" + + "github.com/mosuka/blast/grpc" + "github.com/urfave/cli" +) + +func clusterNodeHealth(c *cli.Context) error { + grpcAddr := c.String("grpc-address") + liveness := c.Bool("liveness") + readiness := c.Bool("readiness") + + client, err := grpc.NewClient(grpcAddr) + if err != nil { + return err + } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() + + if !liveness && !readiness { + LivenessState, err := client.LivenessProbe() + if err != nil { + return err + } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", LivenessState)) + + readinessState, err := client.ReadinessProbe() + if err != nil { + return err + } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", readinessState)) + } else { + if liveness { + state, err := client.LivenessProbe() + if err != nil { + return err + } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + } + if readiness { + state, err := client.ReadinessProbe() + if err != nil { + return err + } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + } + } + + return nil +} diff --git a/cmd/blast/get_node.go b/cmd/blast/cluster_node_info.go similarity index 79% rename from cmd/blast/get_node.go rename to cmd/blast/cluster_node_info.go index c7c8271..ba93b13 100644 --- a/cmd/blast/get_node.go +++ b/cmd/blast/cluster_node_info.go @@ -23,8 +23,18 @@ import ( "github.com/urfave/cli" ) -func execGetNode(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") +func clusterNodeInfo(c *cli.Context) error { + clusterGrpcAddr := c.String("cluster-grpc-address") + shardId := c.String("shard-id") + peerGrpcAddr := c.String("peer-grpc-address") + + if clusterGrpcAddr != "" && shardId != "" { + + } else if peerGrpcAddr != "" { + + } + + grpcAddr := c.String("grpc-address") nodeId := c.Args().Get(0) diff --git a/cmd/blast/cluster_node_leave.go b/cmd/blast/cluster_node_leave.go new file mode 100644 index 0000000..bfd151e --- /dev/null +++ b/cmd/blast/cluster_node_leave.go @@ -0,0 +1,56 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "fmt" + "os" + + "github.com/mosuka/blast/grpc" + "github.com/urfave/cli" +) + +func clusterNodeLeave(c *cli.Context) error { + clusterGrpcAddr := c.String("cluster-grpc-address") + shardId := c.String("shard-id") + peerGrpcAddr := c.String("peer-grpc-address") + + if clusterGrpcAddr != "" && shardId != "" { + // get grpc address of leader node + } else if peerGrpcAddr != "" { + // get grpc address of leader node + } + + grpcAddr := c.String("grpc-address") + nodeId := c.String("node-id") + + client, err := grpc.NewClient(grpcAddr) + if err != nil { + return err + } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() + + err = client.DeleteNode(nodeId) + if err != nil { + return err + } + + return nil +} diff --git a/cmd/blast/snapshot.go b/cmd/blast/cluster_node_snapshot.go similarity index 91% rename from cmd/blast/snapshot.go rename to cmd/blast/cluster_node_snapshot.go index 7e3ec46..09428cb 100644 --- a/cmd/blast/snapshot.go +++ b/cmd/blast/cluster_node_snapshot.go @@ -22,8 +22,8 @@ import ( "github.com/urfave/cli" ) -func execSnapshot(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") +func clusterNodeSnapshot(c *cli.Context) error { + grpcAddr := c.String("grpc-address") client, err := grpc.NewClient(grpcAddr) if err != nil { diff --git a/cmd/blastd/manager.go b/cmd/blast/cluster_node_start.go similarity index 63% rename from cmd/blastd/manager.go rename to cmd/blast/cluster_node_start.go index da15dd4..feb56c2 100644 --- a/cmd/blastd/manager.go +++ b/cmd/blast/cluster_node_start.go @@ -27,39 +27,41 @@ import ( "github.com/urfave/cli" ) -func startManager(c *cli.Context) error { - logLevel := c.GlobalString("log-level") - logFilename := c.GlobalString("log-file") - logMaxSize := c.GlobalInt("log-max-size") - logMaxBackups := c.GlobalInt("log-max-backups") - logMaxAge := c.GlobalInt("log-max-age") - logCompress := c.GlobalBool("log-compress") - - grpcLogLevel := c.GlobalString("grpc-log-level") - grpcLogFilename := c.GlobalString("grpc-log-file") - grpcLogMaxSize := c.GlobalInt("grpc-log-max-size") - grpcLogMaxBackups := c.GlobalInt("grpc-log-max-backups") - grpcLogMaxAge := c.GlobalInt("grpc-log-max-age") - grpcLogCompress := c.GlobalBool("grpc-log-compress") - - httpAccessLogFilename := c.GlobalString("http-access-log-file") - httpAccessLogMaxSize := c.GlobalInt("http-access-log-max-size") - httpAccessLogMaxBackups := c.GlobalInt("http-access-log-max-backups") - httpAccessLogMaxAge := c.GlobalInt("http-access-log-max-age") - httpAccessLogCompress := c.GlobalBool("http-access-log-compress") +func clusterNodeStart(c *cli.Context) error { + peerGrpcAddr := c.String("peer-grpc-address") + + grpcAddr := c.String("grpc-address") + httpAddr := c.String("http-address") nodeId := c.String("node-id") - bindAddr := c.String("bind-addr") - grpcAddr := c.String("grpc-addr") - httpAddr := c.String("http-addr") + nodeAddr := c.String("node-address") dataDir := c.String("data-dir") raftStorageType := c.String("raft-storage-type") - peerAddr := c.String("peer-addr") indexMappingFile := c.String("index-mapping-file") indexType := c.String("index-type") indexStorageType := c.String("index-storage-type") + logLevel := c.String("log-level") + logFilename := c.String("log-file") + logMaxSize := c.Int("log-max-size") + logMaxBackups := c.Int("log-max-backups") + logMaxAge := c.Int("log-max-age") + logCompress := c.Bool("log-compress") + + grpcLogLevel := c.String("grpc-log-level") + grpcLogFilename := c.String("grpc-log-file") + grpcLogMaxSize := c.Int("grpc-log-max-size") + grpcLogMaxBackups := c.Int("grpc-log-max-backups") + grpcLogMaxAge := c.Int("grpc-log-max-age") + grpcLogCompress := c.Bool("grpc-log-compress") + + httpLogFilename := c.String("http-log-file") + httpLogMaxSize := c.Int("http-log-max-size") + httpLogMaxBackups := c.Int("http-log-max-backups") + httpLogMaxAge := c.Int("http-log-max-age") + httpLogCompress := c.Bool("http-log-compress") + // create logger logger := logutils.NewLogger( logLevel, @@ -81,24 +83,24 @@ func startManager(c *cli.Context) error { ) // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger( - httpAccessLogFilename, - httpAccessLogMaxSize, - httpAccessLogMaxBackups, - httpAccessLogMaxAge, - httpAccessLogCompress, + httpLogger := logutils.NewApacheCombinedLogger( + httpLogFilename, + httpLogMaxSize, + httpLogMaxBackups, + httpLogMaxAge, + httpLogCompress, ) // create cluster config clusterConfig := config.DefaultClusterConfig() - if peerAddr != "" { - clusterConfig.PeerAddr = peerAddr + if peerGrpcAddr != "" { + clusterConfig.PeerAddr = peerGrpcAddr } // create node config nodeConfig := &config.NodeConfig{ NodeId: nodeId, - BindAddr: bindAddr, + BindAddr: nodeAddr, GRPCAddr: grpcAddr, HTTPAddr: httpAddr, DataDir: dataDir, @@ -125,7 +127,7 @@ func startManager(c *cli.Context) error { IndexStorageType: indexStorageType, } - svr, err := manager.NewServer(clusterConfig, nodeConfig, indexConfig, logger.Named(nodeId), grpcLogger.Named(nodeId), httpAccessLogger) + svr, err := manager.NewServer(clusterConfig, nodeConfig, indexConfig, logger.Named(nodeId), grpcLogger.Named(nodeId), httpLogger) if err != nil { return err } diff --git a/cmd/blast/get_cluster.go b/cmd/blast/cluster_peers_info.go similarity index 93% rename from cmd/blast/get_cluster.go rename to cmd/blast/cluster_peers_info.go index 1c123af..517a08d 100644 --- a/cmd/blast/get_cluster.go +++ b/cmd/blast/cluster_peers_info.go @@ -23,8 +23,8 @@ import ( "github.com/urfave/cli" ) -func execGetCluster(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") +func clusterPeersInfo(c *cli.Context) error { + grpcAddr := c.String("grpc-address") client, err := grpc.NewClient(grpcAddr) if err != nil { diff --git a/cmd/blast/watch_cluster.go b/cmd/blast/cluster_peers_watch.go similarity index 93% rename from cmd/blast/watch_cluster.go rename to cmd/blast/cluster_peers_watch.go index cb2e267..ebf9c8e 100644 --- a/cmd/blast/watch_cluster.go +++ b/cmd/blast/cluster_peers_watch.go @@ -27,8 +27,8 @@ import ( "github.com/urfave/cli" ) -func execWatchCluster(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") +func clusterPeersWatch(c *cli.Context) error { + grpcAddr := c.String("grpc-address") client, err := grpc.NewClient(grpcAddr) if err != nil { @@ -41,7 +41,7 @@ func execWatchCluster(c *cli.Context) error { } }() - err = execGetCluster(c) + err = indexerPeersInfo(c) if err != nil { return err } diff --git a/cmd/blast/set_value.go b/cmd/blast/cluster_set.go similarity index 94% rename from cmd/blast/set_value.go rename to cmd/blast/cluster_set.go index 04c3032..4ac4328 100644 --- a/cmd/blast/set_value.go +++ b/cmd/blast/cluster_set.go @@ -24,8 +24,8 @@ import ( "github.com/urfave/cli" ) -func execSetValue(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") +func clusterSet(c *cli.Context) error { + grpcAddr := c.String("grpc-address") key := c.Args().Get(0) if key == "" { diff --git a/cmd/blast/watch_store.go b/cmd/blast/cluster_watch.go similarity index 96% rename from cmd/blast/watch_store.go rename to cmd/blast/cluster_watch.go index e10b1c5..e1d2546 100644 --- a/cmd/blast/watch_store.go +++ b/cmd/blast/cluster_watch.go @@ -27,8 +27,8 @@ import ( "github.com/urfave/cli" ) -func execWatchStore(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") +func clusterWatch(c *cli.Context) error { + grpcAddr := c.String("grpc-address") key := c.Args().Get(0) diff --git a/cmd/blast/set_document.go b/cmd/blast/distributor_delete.go similarity index 55% rename from cmd/blast/set_document.go rename to cmd/blast/distributor_delete.go index fded562..76e7a82 100644 --- a/cmd/blast/set_document.go +++ b/cmd/blast/distributor_delete.go @@ -15,53 +15,68 @@ package main import ( + "bufio" "encoding/json" - "errors" "fmt" + "io" "os" "github.com/mosuka/blast/grpc" "github.com/urfave/cli" ) -func execSetDocument(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") +func distributorDelete(c *cli.Context) error { + grpcAddr := c.String("grpc-address") + filePath := c.String("file") + id := c.Args().Get(0) - // create documents - docs := make([]map[string]interface{}, 0) + ids := make([]string, 0) - if c.NArg() == 1 { - // documents - docsStr := c.Args().Get(0) + if id != "" { + ids = append(ids, id) + } - err := json.Unmarshal([]byte(docsStr), &docs) + if filePath != "" { + _, err := os.Stat(filePath) if err != nil { + if os.IsNotExist(err) { + // does not exist + return err + } + // other error return err } - } else if c.NArg() == 2 { - // document - id := c.Args().Get(0) - fieldsSrc := c.Args().Get(1) - - // string -> map[string]interface{} - var fields map[string]interface{} - err := json.Unmarshal([]byte(fieldsSrc), &fields) + + // read index mapping file + file, err := os.Open(filePath) if err != nil { return err } + defer func() { + _ = file.Close() + }() - // create document - doc := map[string]interface{}{ - "id": id, - "fields": fields, - } + reader := bufio.NewReader(file) + for { + docId, err := reader.ReadString('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if docId != "" { + ids = append(ids, docId) + } + break + } - docs = append(docs, doc) - } else { - return errors.New("argument error") + return err + } + + if docId != "" { + ids = append(ids, docId) + } + } } - // create gRPC client + // create client client, err := grpc.NewClient(grpcAddr) if err != nil { return err @@ -73,13 +88,12 @@ func execSetDocument(c *cli.Context) error { } }() - // index documents in bulk - count, err := client.IndexDocument(docs) + result, err := client.DeleteDocument(ids) if err != nil { return err } - resultBytes, err := json.MarshalIndent(count, "", " ") + resultBytes, err := json.MarshalIndent(result, "", " ") if err != nil { return err } diff --git a/cmd/blast/distributor_get.go b/cmd/blast/distributor_get.go new file mode 100644 index 0000000..5dbe684 --- /dev/null +++ b/cmd/blast/distributor_get.go @@ -0,0 +1,59 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "encoding/json" + "errors" + "fmt" + "os" + + "github.com/mosuka/blast/grpc" + "github.com/urfave/cli" +) + +func distributorGet(c *cli.Context) error { + grpcAddr := c.String("grpc-address") + id := c.Args().Get(0) + if id == "" { + err := errors.New("arguments are not correct") + return err + } + + client, err := grpc.NewClient(grpcAddr) + if err != nil { + return err + } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() + + fields, err := client.GetDocument(id) + if err != nil { + return err + } + + fieldsBytes, err := json.MarshalIndent(fields, "", " ") + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(fieldsBytes))) + + return nil +} diff --git a/cmd/blast/distributor_index.go b/cmd/blast/distributor_index.go new file mode 100644 index 0000000..795cb2f --- /dev/null +++ b/cmd/blast/distributor_index.go @@ -0,0 +1,141 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/indexutils" + "github.com/urfave/cli" +) + +func distributorIndex(c *cli.Context) error { + grpcAddr := c.String("grpc-address") + filePath := c.String("file") + bulk := c.Bool("bulk") + id := c.Args().Get(0) + fieldsSrc := c.Args().Get(1) + + docs := make([]*indexutils.Document, 0) + + if id != "" && fieldsSrc != "" { + // create fields + var fields map[string]interface{} + err := json.Unmarshal([]byte(fieldsSrc), &fields) + if err != nil { + return err + } + + // create document + doc, err := indexutils.NewDocument(id, fields) + if err != nil { + return err + } + + docs = append(docs, doc) + } + + if filePath != "" { + _, err := os.Stat(filePath) + if err != nil { + if os.IsNotExist(err) { + // does not exist + return err + } + // other error + return err + } + + // read index mapping file + file, err := os.Open(filePath) + if err != nil { + return err + } + defer func() { + _ = file.Close() + }() + + if bulk { + reader := bufio.NewReader(file) + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + doc, err := indexutils.NewDocumentFromBytes(docBytes) + if err != nil { + return err + } + docs = append(docs, doc) + } + break + } + } + + if len(docBytes) > 0 { + doc, err := indexutils.NewDocumentFromBytes(docBytes) + if err != nil { + return err + } + docs = append(docs, doc) + } + } + } else { + docBytes, err := ioutil.ReadAll(file) + if err != nil { + return err + } + + doc, err := indexutils.NewDocumentFromBytes(docBytes) + if err != nil { + return err + } + docs = append(docs, doc) + } + } + + // create gRPC client + client, err := grpc.NewClient(grpcAddr) + if err != nil { + return err + } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() + + // index documents in bulk + count, err := client.IndexDocument(docs) + if err != nil { + return err + } + + resultBytes, err := json.MarshalIndent(count, "", " ") + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) + + return nil +} diff --git a/cmd/blast/distributor_node_health.go b/cmd/blast/distributor_node_health.go new file mode 100644 index 0000000..a25c357 --- /dev/null +++ b/cmd/blast/distributor_node_health.go @@ -0,0 +1,71 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "fmt" + "os" + + "github.com/mosuka/blast/grpc" + "github.com/urfave/cli" +) + +func distributorNodeHealth(c *cli.Context) error { + grpcAddr := c.String("grpc-address") + liveness := c.Bool("liveness") + readiness := c.Bool("readiness") + + client, err := grpc.NewClient(grpcAddr) + if err != nil { + return err + } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() + + if !liveness && !readiness { + LivenessState, err := client.LivenessProbe() + if err != nil { + return err + } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", LivenessState)) + + readinessState, err := client.ReadinessProbe() + if err != nil { + return err + } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", readinessState)) + } else { + if liveness { + state, err := client.LivenessProbe() + if err != nil { + return err + } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + } + if readiness { + state, err := client.ReadinessProbe() + if err != nil { + return err + } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + } + } + + return nil +} diff --git a/cmd/blastd/dispatcher.go b/cmd/blast/distributor_node_start.go similarity index 79% rename from cmd/blastd/dispatcher.go rename to cmd/blast/distributor_node_start.go index 95ad75b..bba46ee 100644 --- a/cmd/blastd/dispatcher.go +++ b/cmd/blast/distributor_node_start.go @@ -25,7 +25,12 @@ import ( "github.com/urfave/cli" ) -func startDispatcher(c *cli.Context) error { +func distributorNodeStart(c *cli.Context) error { + managerAddr := c.String("cluster-grpc-address") + + grpcAddr := c.String("grpc-address") + httpAddr := c.String("http-address") + logLevel := c.GlobalString("log-level") logFilename := c.GlobalString("log-file") logMaxSize := c.GlobalInt("log-max-size") @@ -40,16 +45,11 @@ func startDispatcher(c *cli.Context) error { grpcLogMaxAge := c.GlobalInt("grpc-log-max-age") grpcLogCompress := c.GlobalBool("grpc-log-compress") - httpAccessLogFilename := c.GlobalString("http-access-log-file") - httpAccessLogMaxSize := c.GlobalInt("http-access-log-max-size") - httpAccessLogMaxBackups := c.GlobalInt("http-access-log-max-backups") - httpAccessLogMaxAge := c.GlobalInt("http-access-log-max-age") - httpAccessLogCompress := c.GlobalBool("http-access-log-compress") - - managerAddr := c.String("manager-addr") - - grpcAddr := c.String("grpc-addr") - httpAddr := c.String("http-addr") + httpLogFilename := c.GlobalString("http-log-file") + httpLogMaxSize := c.GlobalInt("http-log-max-size") + httpLogMaxBackups := c.GlobalInt("http-log-max-backups") + httpLogMaxAge := c.GlobalInt("http-log-max-age") + httpLogCompress := c.GlobalBool("http-log-compress") // create logger logger := logutils.NewLogger( @@ -73,11 +73,11 @@ func startDispatcher(c *cli.Context) error { // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger( - httpAccessLogFilename, - httpAccessLogMaxSize, - httpAccessLogMaxBackups, - httpAccessLogMaxAge, - httpAccessLogCompress, + httpLogFilename, + httpLogMaxSize, + httpLogMaxBackups, + httpLogMaxAge, + httpLogCompress, ) // create cluster config diff --git a/cmd/blast/distributor_search.go b/cmd/blast/distributor_search.go new file mode 100644 index 0000000..c50163a --- /dev/null +++ b/cmd/blast/distributor_search.go @@ -0,0 +1,93 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + + "github.com/blevesearch/bleve" + "github.com/mosuka/blast/grpc" + "github.com/urfave/cli" +) + +func distributorSearch(c *cli.Context) error { + grpcAddr := c.String("grpc-address") + searchRequestPath := c.String("file") + + searchRequest := bleve.NewSearchRequest(nil) + + if searchRequestPath != "" { + _, err := os.Stat(searchRequestPath) + if err != nil { + if os.IsNotExist(err) { + // does not exist + return err + } + // other error + return err + } + + // open file + searchRequestFile, err := os.Open(searchRequestPath) + if err != nil { + return err + } + defer func() { + _ = searchRequestFile.Close() + }() + + // read file + searchRequestBytes, err := ioutil.ReadAll(searchRequestFile) + if err != nil { + return err + } + + // create search request + if searchRequestBytes != nil { + err := json.Unmarshal(searchRequestBytes, searchRequest) + if err != nil { + return err + } + } + } + + client, err := grpc.NewClient(grpcAddr) + if err != nil { + return err + } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() + + searchResult, err := client.Search(searchRequest) + if err != nil { + return err + } + + jsonBytes, err := json.MarshalIndent(&searchResult, "", " ") + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(jsonBytes))) + + return nil +} diff --git a/cmd/blast/delete_document.go b/cmd/blast/indexer_delete.go similarity index 60% rename from cmd/blast/delete_document.go rename to cmd/blast/indexer_delete.go index 558e959..680d7e5 100644 --- a/cmd/blast/delete_document.go +++ b/cmd/blast/indexer_delete.go @@ -15,31 +15,65 @@ package main import ( + "bufio" "encoding/json" "fmt" + "io" "os" "github.com/mosuka/blast/grpc" "github.com/urfave/cli" ) -func execDeleteDocument(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") +func indexerDelete(c *cli.Context) error { + grpcAddr := c.String("grpc-address") + filePath := c.String("file") + id := c.Args().Get(0) - // create documents ids := make([]string, 0) - // documents - idsStr := c.Args().Get(0) + if id != "" { + ids = append(ids, id) + } - err := json.Unmarshal([]byte(idsStr), &ids) - if err != nil { - switch err.(type) { - case *json.SyntaxError: - ids = append(ids, idsStr) - default: + if filePath != "" { + _, err := os.Stat(filePath) + if err != nil { + if os.IsNotExist(err) { + // does not exist + return err + } + // other error return err } + + // read index mapping file + file, err := os.Open(filePath) + if err != nil { + return err + } + defer func() { + _ = file.Close() + }() + + reader := bufio.NewReader(file) + for { + docId, err := reader.ReadString('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if docId != "" { + ids = append(ids, docId) + } + break + } + + return err + } + + if docId != "" { + ids = append(ids, docId) + } + } } // create client diff --git a/cmd/blast/get_document.go b/cmd/blast/indexer_get.go similarity index 93% rename from cmd/blast/get_document.go rename to cmd/blast/indexer_get.go index 98b31de..148b062 100644 --- a/cmd/blast/get_document.go +++ b/cmd/blast/indexer_get.go @@ -24,8 +24,8 @@ import ( "github.com/urfave/cli" ) -func execGetDocument(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") +func indexerGet(c *cli.Context) error { + grpcAddr := c.String("grpc-address") id := c.Args().Get(0) if id == "" { err := errors.New("arguments are not correct") diff --git a/cmd/blast/indexer_index.go b/cmd/blast/indexer_index.go new file mode 100644 index 0000000..0b73e71 --- /dev/null +++ b/cmd/blast/indexer_index.go @@ -0,0 +1,141 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/indexutils" + "github.com/urfave/cli" +) + +func indexerIndex(c *cli.Context) error { + grpcAddr := c.String("grpc-address") + filePath := c.String("file") + bulk := c.Bool("bulk") + id := c.Args().Get(0) + fieldsSrc := c.Args().Get(1) + + docs := make([]*indexutils.Document, 0) + + if id != "" && fieldsSrc != "" { + // create fields + var fields map[string]interface{} + err := json.Unmarshal([]byte(fieldsSrc), &fields) + if err != nil { + return err + } + + // create document + doc, err := indexutils.NewDocument(id, fields) + if err != nil { + return err + } + + docs = append(docs, doc) + } + + if filePath != "" { + _, err := os.Stat(filePath) + if err != nil { + if os.IsNotExist(err) { + // does not exist + return err + } + // other error + return err + } + + // read index mapping file + file, err := os.Open(filePath) + if err != nil { + return err + } + defer func() { + _ = file.Close() + }() + + if bulk { + reader := bufio.NewReader(file) + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + doc, err := indexutils.NewDocumentFromBytes(docBytes) + if err != nil { + return err + } + docs = append(docs, doc) + } + break + } + } + + if len(docBytes) > 0 { + doc, err := indexutils.NewDocumentFromBytes(docBytes) + if err != nil { + return err + } + docs = append(docs, doc) + } + } + } else { + docBytes, err := ioutil.ReadAll(file) + if err != nil { + return err + } + + doc, err := indexutils.NewDocumentFromBytes(docBytes) + if err != nil { + return err + } + docs = append(docs, doc) + } + } + + // create gRPC client + client, err := grpc.NewClient(grpcAddr) + if err != nil { + return err + } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() + + // index documents in bulk + count, err := client.IndexDocument(docs) + if err != nil { + return err + } + + resultBytes, err := json.MarshalIndent(count, "", " ") + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) + + return nil +} diff --git a/cmd/blast/indexer_node_health.go b/cmd/blast/indexer_node_health.go new file mode 100644 index 0000000..478e3ce --- /dev/null +++ b/cmd/blast/indexer_node_health.go @@ -0,0 +1,71 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "fmt" + "os" + + "github.com/mosuka/blast/grpc" + "github.com/urfave/cli" +) + +func indexerNodeHealth(c *cli.Context) error { + grpcAddr := c.String("grpc-address") + liveness := c.Bool("liveness") + readiness := c.Bool("readiness") + + client, err := grpc.NewClient(grpcAddr) + if err != nil { + return err + } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() + + if !liveness && !readiness { + LivenessState, err := client.LivenessProbe() + if err != nil { + return err + } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", LivenessState)) + + readinessState, err := client.ReadinessProbe() + if err != nil { + return err + } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", readinessState)) + } else { + if liveness { + state, err := client.LivenessProbe() + if err != nil { + return err + } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + } + if readiness { + state, err := client.ReadinessProbe() + if err != nil { + return err + } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + } + } + + return nil +} diff --git a/cmd/blast/indexer_node_info.go b/cmd/blast/indexer_node_info.go new file mode 100644 index 0000000..72c964e --- /dev/null +++ b/cmd/blast/indexer_node_info.go @@ -0,0 +1,65 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/mosuka/blast/grpc" + "github.com/urfave/cli" +) + +func indexerNodeInfo(c *cli.Context) error { + clusterGrpcAddr := c.String("cluster-grpc-address") + shardId := c.String("shard-id") + peerGrpcAddr := c.String("peer-grpc-address") + + if clusterGrpcAddr != "" && shardId != "" { + + } else if peerGrpcAddr != "" { + + } + + grpcAddr := c.String("grpc-address") + + nodeId := c.Args().Get(0) + + client, err := grpc.NewClient(grpcAddr) + if err != nil { + return err + } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() + + metadata, err := client.GetNode(nodeId) + if err != nil { + return err + } + + metadataBytes, err := json.MarshalIndent(metadata, "", " ") + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(metadataBytes))) + + return nil +} diff --git a/cmd/blast/indexer_node_leave.go b/cmd/blast/indexer_node_leave.go new file mode 100644 index 0000000..7f150ea --- /dev/null +++ b/cmd/blast/indexer_node_leave.go @@ -0,0 +1,56 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "fmt" + "os" + + "github.com/mosuka/blast/grpc" + "github.com/urfave/cli" +) + +func indexerNodeLeave(c *cli.Context) error { + clusterGrpcAddr := c.String("cluster-grpc-address") + shardId := c.String("shard-id") + peerGrpcAddr := c.String("peer-grpc-address") + + if clusterGrpcAddr != "" && shardId != "" { + // get grpc address of leader node + } else if peerGrpcAddr != "" { + // get grpc address of leader node + } + + grpcAddr := c.String("grpc-address") + nodeId := c.String("node-id") + + client, err := grpc.NewClient(grpcAddr) + if err != nil { + return err + } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() + + err = client.DeleteNode(nodeId) + if err != nil { + return err + } + + return nil +} diff --git a/cmd/blast/delete_node.go b/cmd/blast/indexer_node_snapshot.go similarity index 86% rename from cmd/blast/delete_node.go rename to cmd/blast/indexer_node_snapshot.go index d2d7566..68f3938 100644 --- a/cmd/blast/delete_node.go +++ b/cmd/blast/indexer_node_snapshot.go @@ -22,10 +22,8 @@ import ( "github.com/urfave/cli" ) -func execDeleteNode(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - - nodeId := c.Args().Get(0) +func indexerNodeSnapshot(c *cli.Context) error { + grpcAddr := c.String("grpc-address") client, err := grpc.NewClient(grpcAddr) if err != nil { @@ -38,7 +36,7 @@ func execDeleteNode(c *cli.Context) error { } }() - err = client.DeleteNode(nodeId) + err = client.Snapshot() if err != nil { return err } diff --git a/cmd/blastd/indexer.go b/cmd/blast/indexer_node_start.go similarity index 63% rename from cmd/blastd/indexer.go rename to cmd/blast/indexer_node_start.go index 04bf1af..387b57e 100644 --- a/cmd/blastd/indexer.go +++ b/cmd/blast/indexer_node_start.go @@ -27,42 +27,43 @@ import ( "github.com/urfave/cli" ) -func startIndexer(c *cli.Context) error { - logLevel := c.GlobalString("log-level") - logFilename := c.GlobalString("log-file") - logMaxSize := c.GlobalInt("log-max-size") - logMaxBackups := c.GlobalInt("log-max-backups") - logMaxAge := c.GlobalInt("log-max-age") - logCompress := c.GlobalBool("log-compress") - - grpcLogLevel := c.GlobalString("grpc-log-level") - grpcLogFilename := c.GlobalString("grpc-log-file") - grpcLogMaxSize := c.GlobalInt("grpc-log-max-size") - grpcLogMaxBackups := c.GlobalInt("grpc-log-max-backups") - grpcLogMaxAge := c.GlobalInt("grpc-log-max-age") - grpcLogCompress := c.GlobalBool("grpc-log-compress") - - httpAccessLogFilename := c.GlobalString("http-access-log-file") - httpAccessLogMaxSize := c.GlobalInt("http-access-log-max-size") - httpAccessLogMaxBackups := c.GlobalInt("http-access-log-max-backups") - httpAccessLogMaxAge := c.GlobalInt("http-access-log-max-age") - httpAccessLogCompress := c.GlobalBool("http-access-log-compress") - - managerAddr := c.String("manager-addr") - clusterId := c.String("cluster-id") +func indexerNodeStart(c *cli.Context) error { + clusterGRPCAddr := c.String("cluster-grpc-address") + shardId := c.String("shard-id") + peerGRPCAddr := c.String("peer-grpc-address") + + grpcAddr := c.String("grpc-address") + httpAddr := c.String("http-address") nodeId := c.String("node-id") - bindAddr := c.String("bind-addr") - grpcAddr := c.String("grpc-addr") - httpAddr := c.String("http-addr") + nodeAddr := c.String("node-address") dataDir := c.String("data-dir") raftStorageType := c.String("raft-storage-type") - peerAddr := c.String("peer-addr") indexMappingFile := c.String("index-mapping-file") indexType := c.String("index-type") indexStorageType := c.String("index-storage-type") + logLevel := c.String("log-level") + logFilename := c.String("log-file") + logMaxSize := c.Int("log-max-size") + logMaxBackups := c.Int("log-max-backups") + logMaxAge := c.Int("log-max-age") + logCompress := c.Bool("log-compress") + + grpcLogLevel := c.String("grpc-log-level") + grpcLogFilename := c.String("grpc-log-file") + grpcLogMaxSize := c.Int("grpc-log-max-size") + grpcLogMaxBackups := c.Int("grpc-log-max-backups") + grpcLogMaxAge := c.Int("grpc-log-max-age") + grpcLogCompress := c.Bool("grpc-log-compress") + + httpLogFile := c.String("http-log-file") + httpLogMaxSize := c.Int("http-log-max-size") + httpLogMaxBackups := c.Int("http-log-max-backups") + httpLogMaxAge := c.Int("http-log-max-age") + httpLogCompress := c.Bool("http-log-compress") + // create logger logger := logutils.NewLogger( logLevel, @@ -85,29 +86,29 @@ func startIndexer(c *cli.Context) error { // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger( - httpAccessLogFilename, - httpAccessLogMaxSize, - httpAccessLogMaxBackups, - httpAccessLogMaxAge, - httpAccessLogCompress, + httpLogFile, + httpLogMaxSize, + httpLogMaxBackups, + httpLogMaxAge, + httpLogCompress, ) // create cluster config clusterConfig := config.DefaultClusterConfig() - if managerAddr != "" { - clusterConfig.ManagerAddr = managerAddr + if clusterGRPCAddr != "" { + clusterConfig.ManagerAddr = clusterGRPCAddr } - if clusterId != "" { - clusterConfig.ClusterId = clusterId + if shardId != "" { + clusterConfig.ClusterId = shardId } - if peerAddr != "" { - clusterConfig.PeerAddr = peerAddr + if peerGRPCAddr != "" { + clusterConfig.PeerAddr = peerGRPCAddr } // create node config nodeConfig := &config.NodeConfig{ NodeId: nodeId, - BindAddr: bindAddr, + BindAddr: nodeAddr, GRPCAddr: grpcAddr, HTTPAddr: httpAddr, DataDir: dataDir, diff --git a/cmd/blast/indexer_peers_info.go b/cmd/blast/indexer_peers_info.go new file mode 100644 index 0000000..a438981 --- /dev/null +++ b/cmd/blast/indexer_peers_info.go @@ -0,0 +1,53 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/mosuka/blast/grpc" + "github.com/urfave/cli" +) + +func indexerPeersInfo(c *cli.Context) error { + grpcAddr := c.String("grpc-address") + + client, err := grpc.NewClient(grpcAddr) + if err != nil { + return err + } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() + + cluster, err := client.GetCluster() + if err != nil { + return err + } + + clusterBytes, err := json.MarshalIndent(cluster, "", " ") + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(clusterBytes))) + + return nil +} diff --git a/cmd/blast/indexer_peers_watch.go b/cmd/blast/indexer_peers_watch.go new file mode 100644 index 0000000..1fcd637 --- /dev/null +++ b/cmd/blast/indexer_peers_watch.go @@ -0,0 +1,82 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "log" + "os" + + "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/protobuf" + "github.com/urfave/cli" +) + +func indexerPeersWatch(c *cli.Context) error { + grpcAddr := c.String("grpc-address") + + client, err := grpc.NewClient(grpcAddr) + if err != nil { + return err + } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() + + err = indexerPeersInfo(c) + if err != nil { + return err + } + + watchClient, err := client.WatchCluster() + if err != nil { + return err + } + + for { + resp, err := watchClient.Recv() + if err == io.EOF { + break + } + if err != nil { + log.Println(err.Error()) + break + } + + cluster, err := protobuf.MarshalAny(resp.Cluster) + if err != nil { + return err + } + if cluster == nil { + return errors.New("nil") + } + + var clusterBytes []byte + clusterMap := *cluster.(*map[string]interface{}) + clusterBytes, err = json.MarshalIndent(clusterMap, "", " ") + if err != nil { + return err + } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(clusterBytes))) + } + + return nil +} diff --git a/cmd/blast/search.go b/cmd/blast/indexer_search.go similarity index 63% rename from cmd/blast/search.go rename to cmd/blast/indexer_search.go index 174aa83..c73d821 100644 --- a/cmd/blast/search.go +++ b/cmd/blast/indexer_search.go @@ -16,8 +16,8 @@ package main import ( "encoding/json" - "errors" "fmt" + "io/ioutil" "os" "github.com/blevesearch/bleve" @@ -25,22 +25,45 @@ import ( "github.com/urfave/cli" ) -func execSearch(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") +func indexerSearch(c *cli.Context) error { + grpcAddr := c.String("grpc-address") + searchRequestPath := c.String("file") - searchRequestStr := c.Args().Get(0) - if searchRequestStr == "" { - err := errors.New("key argument must be set") - return err - } - - // string -> bleve.SearchRequest searchRequest := bleve.NewSearchRequest(nil) - if searchRequestStr != "" { - err := json.Unmarshal([]byte(searchRequestStr), searchRequest) + + if searchRequestPath != "" { + _, err := os.Stat(searchRequestPath) + if err != nil { + if os.IsNotExist(err) { + // does not exist + return err + } + // other error + return err + } + + // open file + searchRequestFile, err := os.Open(searchRequestPath) if err != nil { return err } + defer func() { + _ = searchRequestFile.Close() + }() + + // read file + searchRequestBytes, err := ioutil.ReadAll(searchRequestFile) + if err != nil { + return err + } + + // create search request + if searchRequestBytes != nil { + err := json.Unmarshal(searchRequestBytes, searchRequest) + if err != nil { + return err + } + } } client, err := grpc.NewClient(grpcAddr) diff --git a/cmd/blast/livenessprobe.go b/cmd/blast/livenessprobe.go deleted file mode 100644 index b801fd8..0000000 --- a/cmd/blast/livenessprobe.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/mosuka/blast/grpc" - "github.com/urfave/cli" -) - -func execLivenessProbe(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - - client, err := grpc.NewClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - state, err := client.LivenessProbe() - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) - - return nil -} diff --git a/cmd/blast/main.go b/cmd/blast/main.go index 32e9e87..c5725e1 100644 --- a/cmd/blast/main.go +++ b/cmd/blast/main.go @@ -19,6 +19,7 @@ import ( "os" "path" + "github.com/blevesearch/bleve" "github.com/mosuka/blast/version" "github.com/urfave/cli" ) @@ -26,7 +27,7 @@ import ( func main() { app := cli.NewApp() app.Name = path.Base(os.Args[0]) - app.Usage = "blast" + app.Usage = "Command for blast" app.Version = version.Version app.Authors = []cli.Author{ { @@ -34,233 +35,1047 @@ func main() { Email: "minoru.osuka@gmail.com", }, } + app.Commands = []cli.Command{ { - Name: "livenessprobe", - Usage: "liveness probe", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", - }, - }, - Action: execLivenessProbe, - }, - { - Name: "readinessprobe", - Usage: "readiness probe", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", - }, - }, - Action: execReadinessProbe, - }, - { - Name: "get", - Usage: "get", + Name: "cluster", + Usage: "Command for blast cluster", Subcommands: []cli.Command{ { Name: "node", - Usage: "get node", + Usage: "Command for blast cluster node", + Subcommands: []cli.Command{ + { + Name: "start", + Usage: "Start blast cluster node", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "peer-grpc-address", + Value: "", + EnvVar: "BLAST_CLUSTER_PEER_GRPC_ADDRESS", + Usage: "The gRPC address of the peer node that exists in the cluster to be joined", + }, + cli.StringFlag{ + Name: "grpc-address", + Value: ":5100", + EnvVar: "BLAST_CLUSTER_GRPC_ADDRESS", + Usage: "The gRPC listen address", + }, + cli.StringFlag{ + Name: "http-address", + Value: ":8100", + EnvVar: "BLAST_CLUSTER_HTTP_ADDRESS", + Usage: "HTTP listen address", + }, + cli.StringFlag{ + Name: "node-id", + Value: "", + EnvVar: "BLAST_CLUSTER_NODE_ID", + Usage: "Unique ID to identify the node", + }, + cli.StringFlag{ + Name: "node-address", + Value: ":2100", + EnvVar: "BLAST_CLUSTER_NODE_ADDRESS", + Usage: "The address that should be bound to for internal cluster communications", + }, + cli.StringFlag{ + Name: "data-dir", + Value: "/tmp/blast/indexer", + EnvVar: "BLAST_CLUSTER_DATA_DIR", + Usage: "A data directory for the node to store state", + }, + cli.StringFlag{ + Name: "raft-storage-type", + Value: "boltdb", + EnvVar: "BLAST_CLUSTER_RAFT_STORAGE_TYPE", + Usage: "Storage type of the database that stores the state", + }, + cli.StringFlag{ + Name: "index-mapping-file", + Value: "", + EnvVar: "BLAST_CLUSTER_INDEX_MAPPING_FILE", + Usage: "An index mapping file to use", + }, + cli.StringFlag{ + Name: "index-type", + Value: bleve.Config.DefaultIndexType, + EnvVar: "BLAST_CLUSTER_INDEX_TYPE", + Usage: "An index type to use", + }, + cli.StringFlag{ + Name: "index-storage-type", + Value: bleve.Config.DefaultKVStore, + EnvVar: "BLAST_CLUSTER_INDEX_STORAGE_TYPE", + Usage: "An index storage type to use", + }, + cli.StringFlag{ + Name: "log-level", + Value: "INFO", + EnvVar: "BLAST_CLUSTER_LOG_LEVEL", + Usage: "Log level", + }, + cli.StringFlag{ + Name: "log-file", + Value: os.Stderr.Name(), + EnvVar: "BLAST_CLUSTER_LOG_FILE", + Usage: "Log file", + }, + cli.IntFlag{ + Name: "log-max-size", + Value: 500, + EnvVar: "BLAST_CLUSTER_LOG_MAX_SIZE", + Usage: "Max size of a log file (megabytes)", + }, + cli.IntFlag{ + Name: "log-max-backups", + Value: 3, + EnvVar: "BLAST_CLUSTER_LOG_MAX_BACKUPS", + Usage: "Max backup count of log files", + }, + cli.IntFlag{ + Name: "log-max-age", + Value: 30, + EnvVar: "BLAST_CLUSTER_LOG_MAX_AGE", + Usage: "Max age of a log file (days)", + }, + cli.BoolFlag{ + Name: "log-compress", + EnvVar: "BLAST_CLUSTER_LOG_COMPRESS", + Usage: "Compress a log file", + }, + cli.StringFlag{ + Name: "grpc-log-level", + Value: "WARN", + EnvVar: "BLAST_CLUSTER_GRPC_LOG_LEVEL", + Usage: "gRPC log level", + }, + cli.StringFlag{ + Name: "grpc-log-file", + Value: os.Stderr.Name(), + EnvVar: "BLAST_CLUSTER_GRPC_LOG_FILE", + Usage: "gRPC log file", + }, + cli.IntFlag{ + Name: "grpc-log-max-size", + Value: 500, + EnvVar: "BLAST_CLUSTER_GRPC_LOG_MAX_SIZE", + Usage: "Max size of a log file (megabytes)", + }, + cli.IntFlag{ + Name: "grpc-log-max-backups", + Value: 3, + EnvVar: "BLAST_CLUSTER_GRPC_LOG_MAX_BACKUPS", + Usage: "Max backup count of log files", + }, + cli.IntFlag{ + Name: "grpc-log-max-age", + Value: 30, + EnvVar: "BLAST_CLUSTER_GRPC_LOG_MAX_AGE", + Usage: "Max age of a log file (days)", + }, + cli.BoolFlag{ + Name: "grpc-log-compress", + EnvVar: "BLAST_CLUSTER_GRPC_LOG_COMPRESS", + Usage: "Compress a log file", + }, + cli.StringFlag{ + Name: "http-log-file", + Value: os.Stderr.Name(), + EnvVar: "BLAST_CLUSTER_HTTP_LOG_FILE", + Usage: "HTTP access log file", + }, + cli.IntFlag{ + Name: "http-log-max-size", + Value: 500, + EnvVar: "BLAST_CLUSTER_HTTP_LOG_MAX_SIZE", + Usage: "Max size of a HTTP access log file (megabytes)", + }, + cli.IntFlag{ + Name: "http-log-max-backups", + Value: 3, + EnvVar: "BLAST_CLUSTER_HTTP_LOG_MAX_BACKUPS", + Usage: "Max backup count of HTTP access log files", + }, + cli.IntFlag{ + Name: "http-log-max-age", + Value: 30, + EnvVar: "BLAST_CLUSTER_HTTP_LOG_MAX_AGE", + Usage: "Max age of a HTTP access log file (days)", + }, + cli.BoolFlag{ + Name: "http-log-compress", + EnvVar: "BLAST_CLUSTER_HTTP_LOG_COMPRESS", + Usage: "Compress a HTTP access log", + }, + }, + Action: clusterNodeStart, + }, + { + Name: "info", + Usage: "Get node information", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "peer-grpc-address", + Value: "", + Usage: "The gRPC address of the peer node in which the target node for retrieving the information is joining", + }, + cli.StringFlag{ + Name: "node-id", + Value: "", + Usage: "The node ID for which to retrieve the node information", + }, + cli.StringFlag{ + Name: "grpc-address", + Value: "", + Usage: "The gRPC address of the node for which to retrieve the node information", + }, + }, + Action: clusterNodeInfo, + }, + { + Name: "leave", + Usage: "Leave the node from the cluster", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "peer-grpc-address", + Value: "", + Usage: "The gRPC address of the peer node that exists in the cluster to be joined", + }, + cli.StringFlag{ + Name: "node-id", + Value: "", + Usage: "The gRPC listen address", + }, + }, + Action: clusterNodeLeave, + }, + { + Name: "health", + Usage: "Health check", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", + }, + cli.BoolFlag{ + Name: "liveness", + Usage: "Liveness probe", + }, + cli.BoolFlag{ + Name: "readiness", + Usage: "Readiness probe", + }, + }, + Action: clusterNodeHealth, + }, + { + Name: "snapshot", + Usage: "Snapshot", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", + }, + }, + Action: clusterNodeSnapshot, + }, + }, + }, + { + Name: "peers", + Usage: "Command for blast cluster peers", + Subcommands: []cli.Command{ + { + Name: "info", + Usage: "Get peers", + Flags: []cli.Flag{ + //cli.StringFlag{ + // Name: "cluster-grpc-address", + // Value: "", + // Usage: "The gRPC address of the cluster in which the target node for retrieving the information is joining", + //}, + //cli.StringFlag{ + // Name: "shard-id", + // Value: "", + // Usage: "Shard ID registered in which the target node for retrieving the information is joining", + //}, + //cli.StringFlag{ + // Name: "peer-grpc-address", + // Value: "", + // Usage: "The gRPC address of the peer node in which the target node for retrieving the information is joining", + //}, + //cli.StringFlag{ + // Name: "node-id", + // Value: "", + // Usage: "The node ID for which to retrieve the node information", + //}, + cli.StringFlag{ + Name: "grpc-address", + Value: "", + Usage: "The gRPC address of the node for which to retrieve the node information", + }, + }, + Action: clusterPeersInfo, + }, + { + Name: "watch", + Usage: "Watch peers", + Flags: []cli.Flag{ + //cli.StringFlag{ + // Name: "cluster-grpc-address", + // Value: "", + // Usage: "The gRPC address of the cluster in which the target node for retrieving the information is joining", + //}, + //cli.StringFlag{ + // Name: "shard-id", + // Value: "", + // Usage: "Shard ID registered in which the target node for retrieving the information is joining", + //}, + //cli.StringFlag{ + // Name: "peer-grpc-address", + // Value: "", + // Usage: "The gRPC address of the peer node in which the target node for retrieving the information is joining", + //}, + //cli.StringFlag{ + // Name: "node-id", + // Value: "", + // Usage: "The node ID for which to retrieve the node information", + //}, + cli.StringFlag{ + Name: "grpc-address", + Value: "", + Usage: "The gRPC address of the node for which to retrieve the node information", + }, + }, + Action: clusterPeersWatch, + }, + }, + }, + { + Name: "get", + Usage: "Get data", Flags: []cli.Flag{ cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", }, }, - ArgsUsage: "[id]", - Action: execGetNode, + ArgsUsage: "[key]", + Action: clusterGet, }, { - Name: "cluster", - Usage: "get cluster", + Name: "set", + Usage: "Set data", Flags: []cli.Flag{ cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", + }, + cli.StringFlag{ + Name: "file", + Value: "", + Usage: "Value file", }, }, - Action: execGetCluster, + ArgsUsage: "[key] [value]", + Action: clusterSet, }, { - Name: "value", - Usage: "get value", + Name: "delete", + Usage: "Delete data", Flags: []cli.Flag{ cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", }, }, ArgsUsage: "[key]", - Action: execGetValue, + Action: clusterDelete, }, { - Name: "document", - Usage: "get document", + Name: "watch", + Usage: "Watch data", Flags: []cli.Flag{ cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", }, }, - ArgsUsage: "[id]", - Action: execGetDocument, + ArgsUsage: "[key]", + Action: clusterWatch, }, }, }, { - Name: "set", - Usage: "set", + Name: "indexer", + Usage: "Command for blast indexer", Subcommands: []cli.Command{ { Name: "node", - Usage: "set node", + Usage: "Command for blast indexer node", + Subcommands: []cli.Command{ + { + Name: "start", + Usage: "Start blast indexer node", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "cluster-grpc-address", + Value: "", + EnvVar: "BLAST_INDEXER_CLUSTER_GRPC_ADDRESS", + Usage: "The gRPC address of the existing cluster node to be joined", + }, + cli.StringFlag{ + Name: "shard-id", + Value: "", + EnvVar: "BLAST_INDEXER_SHARD_ID", + Usage: "Shard ID registered in the existing cluster to be joined", + }, + cli.StringFlag{ + Name: "peer-grpc-address", + Value: "", + EnvVar: "BLAST_INDEXER_PEER_GRPC_ADDRESS", + Usage: "The gRPC address of the peer node that exists in the cluster to be joined", + }, + cli.StringFlag{ + Name: "grpc-address", + Value: ":5000", + EnvVar: "BLAST_INDEXER_GRPC_ADDRESS", + Usage: "The gRPC listen address", + }, + cli.StringFlag{ + Name: "http-address", + Value: ":8000", + EnvVar: "BLAST_INDEXER_HTTP_ADDRESS", + Usage: "HTTP listen address", + }, + cli.StringFlag{ + Name: "node-id", + Value: "", + EnvVar: "BLAST_INDEXER_NODE_ID", + Usage: "Unique ID to identify the node", + }, + cli.StringFlag{ + Name: "node-address", + Value: ":2000", + EnvVar: "BLAST_INDEXER_NODE_ADDRESS", + Usage: "The address that should be bound to for internal cluster communications", + }, + cli.StringFlag{ + Name: "data-dir", + Value: "/tmp/blast/indexer", + EnvVar: "BLAST_INDEXER_DATA_DIR", + Usage: "A data directory for the node to store state", + }, + cli.StringFlag{ + Name: "raft-storage-type", + Value: "boltdb", + EnvVar: "BLAST_INDEXER_RAFT_STORAGE_TYPE", + Usage: "Storage type of the database that stores the state", + }, + cli.StringFlag{ + Name: "index-mapping-file", + Value: "", + EnvVar: "BLAST_INDEXER_INDEX_MAPPING_FILE", + Usage: "An index mapping file to use", + }, + cli.StringFlag{ + Name: "index-type", + Value: bleve.Config.DefaultIndexType, + EnvVar: "BLAST_INDEXER_INDEX_TYPE", + Usage: "An index type to use", + }, + cli.StringFlag{ + Name: "index-storage-type", + Value: bleve.Config.DefaultKVStore, + EnvVar: "BLAST_INDEXER_INDEX_STORAGE_TYPE", + Usage: "An index storage type to use", + }, + cli.StringFlag{ + Name: "log-level", + Value: "INFO", + EnvVar: "BLAST_INDEXER_LOG_LEVEL", + Usage: "Log level", + }, + cli.StringFlag{ + Name: "log-file", + Value: os.Stderr.Name(), + EnvVar: "BLAST_INDEXER_LOG_FILE", + Usage: "Log file", + }, + cli.IntFlag{ + Name: "log-max-size", + Value: 500, + EnvVar: "BLAST_INDEXER_LOG_MAX_SIZE", + Usage: "Max size of a log file (megabytes)", + }, + cli.IntFlag{ + Name: "log-max-backups", + Value: 3, + EnvVar: "BLAST_INDEXER_LOG_MAX_BACKUPS", + Usage: "Max backup count of log files", + }, + cli.IntFlag{ + Name: "log-max-age", + Value: 30, + EnvVar: "BLAST_INDEXER_LOG_MAX_AGE", + Usage: "Max age of a log file (days)", + }, + cli.BoolFlag{ + Name: "log-compress", + EnvVar: "BLAST_INDEXER_LOG_COMPRESS", + Usage: "Compress a log file", + }, + cli.StringFlag{ + Name: "grpc-log-level", + Value: "WARN", + EnvVar: "BLAST_INDEXER_GRPC_LOG_LEVEL", + Usage: "gRPC log level", + }, + cli.StringFlag{ + Name: "grpc-log-file", + Value: os.Stderr.Name(), + EnvVar: "BLAST_INDEXER_GRPC_LOG_FILE", + Usage: "gRPC log file", + }, + cli.IntFlag{ + Name: "grpc-log-max-size", + Value: 500, + EnvVar: "BLAST_INDEXER_GRPC_LOG_MAX_SIZE", + Usage: "Max size of a log file (megabytes)", + }, + cli.IntFlag{ + Name: "grpc-log-max-backups", + Value: 3, + EnvVar: "BLAST_INDEXER_GRPC_LOG_MAX_BACKUPS", + Usage: "Max backup count of log files", + }, + cli.IntFlag{ + Name: "grpc-log-max-age", + Value: 30, + EnvVar: "BLAST_INDEXER_GRPC_LOG_MAX_AGE", + Usage: "Max age of a log file (days)", + }, + cli.BoolFlag{ + Name: "grpc-log-compress", + EnvVar: "BLAST_INDEXER_GRPC_LOG_COMPRESS", + Usage: "Compress a log file", + }, + cli.StringFlag{ + Name: "http-log-file", + Value: os.Stderr.Name(), + EnvVar: "BLAST_INDEXER_HTTP_LOG_FILE", + Usage: "HTTP access log file", + }, + cli.IntFlag{ + Name: "http-log-max-size", + Value: 500, + EnvVar: "BLAST_INDEXER_HTTP_LOG_MAX_SIZE", + Usage: "Max size of a HTTP access log file (megabytes)", + }, + cli.IntFlag{ + Name: "http-log-max-backups", + Value: 3, + EnvVar: "BLAST_INDEXER_HTTP_LOG_MAX_BACKUPS", + Usage: "Max backup count of HTTP access log files", + }, + cli.IntFlag{ + Name: "http-log-max-age", + Value: 30, + EnvVar: "BLAST_INDEXER_HTTP_LOG_MAX_AGE", + Usage: "Max age of a HTTP access log file (days)", + }, + cli.BoolFlag{ + Name: "http-log-compress", + EnvVar: "BLAST_INDEXER_HTTP_LOG_COMPRESS", + Usage: "Compress a HTTP access log", + }, + }, + Action: indexerNodeStart, + }, + { + Name: "info", + Usage: "Get node information", + Flags: []cli.Flag{ + //cli.StringFlag{ + // Name: "cluster-grpc-address", + // Value: "", + // Usage: "The gRPC address of the cluster in which the target node for retrieving the information is joining", + //}, + //cli.StringFlag{ + // Name: "shard-id", + // Value: "", + // Usage: "Shard ID registered in which the target node for retrieving the information is joining", + //}, + //cli.StringFlag{ + // Name: "peer-grpc-address", + // Value: "", + // Usage: "The gRPC address of the peer node in which the target node for retrieving the information is joining", + //}, + //cli.StringFlag{ + // Name: "node-id", + // Value: "", + // Usage: "The node ID for which to retrieve the node information", + //}, + cli.StringFlag{ + Name: "grpc-address", + Value: "", + Usage: "The gRPC address of the node for which to retrieve the node information", + }, + }, + Action: indexerNodeInfo, + }, + { + Name: "leave", + Usage: "Leave the node from the cluster", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "cluster-grpc-address", + Value: "", + Usage: "The gRPC address of the existing cluster node to be joined", + }, + cli.StringFlag{ + Name: "shard-id", + Value: "", + Usage: "Shard ID registered in the existing cluster to be joined", + }, + cli.StringFlag{ + Name: "peer-grpc-address", + Value: "", + Usage: "The gRPC address of the peer node that exists in the cluster to be joined", + }, + cli.StringFlag{ + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", + }, + cli.StringFlag{ + Name: "node-id", + Value: "", + Usage: "Node ID to delete", + }, + }, + Action: indexerNodeLeave, + }, + { + Name: "health", + Usage: "Health check", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", + }, + cli.BoolFlag{ + Name: "liveness", + Usage: "Liveness probe", + }, + cli.BoolFlag{ + Name: "readiness", + Usage: "Readiness probe", + }, + }, + Action: indexerNodeHealth, + }, + { + Name: "snapshot", + Usage: "Snapshot", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", + }, + }, + Action: indexerNodeSnapshot, + }, + }, + }, + { + Name: "peers", + Usage: "Command for blast indexer peers", + Subcommands: []cli.Command{ + { + Name: "info", + Usage: "Get peers", + Flags: []cli.Flag{ + //cli.StringFlag{ + // Name: "cluster-grpc-address", + // Value: "", + // Usage: "The gRPC address of the cluster in which the target node for retrieving the information is joining", + //}, + //cli.StringFlag{ + // Name: "shard-id", + // Value: "", + // Usage: "Shard ID registered in which the target node for retrieving the information is joining", + //}, + //cli.StringFlag{ + // Name: "peer-grpc-address", + // Value: "", + // Usage: "The gRPC address of the peer node in which the target node for retrieving the information is joining", + //}, + //cli.StringFlag{ + // Name: "node-id", + // Value: "", + // Usage: "The node ID for which to retrieve the node information", + //}, + cli.StringFlag{ + Name: "grpc-address", + Value: "", + Usage: "The gRPC address of the node for which to retrieve the node information", + }, + }, + Action: indexerPeersInfo, + }, + { + Name: "watch", + Usage: "Watch peers", + Flags: []cli.Flag{ + //cli.StringFlag{ + // Name: "cluster-grpc-address", + // Value: "", + // Usage: "The gRPC address of the cluster in which the target node for retrieving the information is joining", + //}, + //cli.StringFlag{ + // Name: "shard-id", + // Value: "", + // Usage: "Shard ID registered in which the target node for retrieving the information is joining", + //}, + //cli.StringFlag{ + // Name: "peer-grpc-address", + // Value: "", + // Usage: "The gRPC address of the peer node in which the target node for retrieving the information is joining", + //}, + //cli.StringFlag{ + // Name: "node-id", + // Value: "", + // Usage: "The node ID for which to retrieve the node information", + //}, + cli.StringFlag{ + Name: "grpc-address", + Value: "", + Usage: "The gRPC address of the node for which to retrieve the node information", + }, + }, + Action: indexerPeersWatch, + }, + }, + }, + { + Name: "get", + Usage: "Get document(s)", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", + }, + cli.StringFlag{ + Name: "file", + Value: "", + Usage: "Document ID list", + }, + }, + ArgsUsage: "[document IDs]", + Action: indexerGet, + }, + { + Name: "index", + Usage: "Index document(s)", Flags: []cli.Flag{ cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", + }, + cli.StringFlag{ + Name: "file", + Value: "", + Usage: "Document list", + }, + cli.BoolFlag{ + Name: "bulk", + Usage: "Bulk indexing", }, }, - ArgsUsage: "[id] [metadata]", - Action: execSetNode, + ArgsUsage: "[document ID] [document fields]", + Action: indexerIndex, }, { - Name: "value", - Usage: "set value", + Name: "delete", + Usage: "Delete document(s)", Flags: []cli.Flag{ cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", + }, + cli.StringFlag{ + Name: "file", + Value: "", + Usage: "Document ID list", }, }, - ArgsUsage: "[key] [value]", - Action: execSetValue, + ArgsUsage: "[document IDs]", + Action: indexerDelete, }, { - Name: "document", - Usage: "set document", + Name: "search", + Usage: "Search document(s)", Flags: []cli.Flag{ cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", + }, + cli.StringFlag{ + Name: "file", + Value: "", + Usage: "Search request", }, }, - ArgsUsage: "[documents | [id] [fields]]", - Action: execSetDocument, + ArgsUsage: "[search request]", + Action: indexerSearch, }, }, }, { - Name: "delete", - Usage: "delete", + Name: "distributor", + Usage: "Command for blast distributor", Subcommands: []cli.Command{ { Name: "node", - Usage: "delete node", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", + Usage: "Command for blast distributor node", + Subcommands: []cli.Command{ + { + Name: "start", + Usage: "Start blast distributor node", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "cluster-grpc-address", + Value: ":5100", + EnvVar: "BLAST_DISTRIBUTOR_CLUSTER_GRPC_ADDRESS", + Usage: "The gRPC address of the existing cluster node to be joined", + }, + cli.StringFlag{ + Name: "grpc-address", + Value: ":5200", + EnvVar: "BLAST_DISTRIBUTOR_GRPC_ADDRESS", + Usage: "The gRPC listen address", + }, + cli.StringFlag{ + Name: "http-address", + Value: ":8200", + EnvVar: "BLAST_DISTRIBUTOR_HTTP_ADDRESS", + Usage: "HTTP listen address", + }, + cli.StringFlag{ + Name: "log-level", + Value: "INFO", + EnvVar: "BLAST_DISTRIBUTOR_LOG_LEVEL", + Usage: "Log level", + }, + cli.StringFlag{ + Name: "log-file", + Value: os.Stderr.Name(), + EnvVar: "BLAST_DISTRIBUTOR_LOG_FILE", + Usage: "Log file", + }, + cli.IntFlag{ + Name: "log-max-size", + Value: 500, + EnvVar: "BLAST_DISTRIBUTOR_LOG_MAX_SIZE", + Usage: "Max size of a log file (megabytes)", + }, + cli.IntFlag{ + Name: "log-max-backups", + Value: 3, + EnvVar: "BLAST_DISTRIBUTOR_LOG_MAX_BACKUPS", + Usage: "Max backup count of log files", + }, + cli.IntFlag{ + Name: "log-max-age", + Value: 30, + EnvVar: "BLAST_DISTRIBUTOR_LOG_MAX_AGE", + Usage: "Max age of a log file (days)", + }, + cli.BoolFlag{ + Name: "log-compress", + EnvVar: "BLAST_DISTRIBUTOR_LOG_COMPRESS", + Usage: "Compress a log file", + }, + cli.StringFlag{ + Name: "grpc-log-level", + Value: "WARN", + EnvVar: "BLAST_DISTRIBUTOR_GRPC_LOG_LEVEL", + Usage: "gRPC log level", + }, + cli.StringFlag{ + Name: "grpc-log-file", + Value: os.Stderr.Name(), + EnvVar: "BLAST_DISTRIBUTOR_GRPC_LOG_FILE", + Usage: "gRPC log file", + }, + cli.IntFlag{ + Name: "grpc-log-max-size", + Value: 500, + EnvVar: "BLAST_DISTRIBUTOR_GRPC_LOG_MAX_SIZE", + Usage: "Max size of a log file (megabytes)", + }, + cli.IntFlag{ + Name: "grpc-log-max-backups", + Value: 3, + EnvVar: "BLAST_DISTRIBUTOR_GRPC_LOG_MAX_BACKUPS", + Usage: "Max backup count of log files", + }, + cli.IntFlag{ + Name: "grpc-log-max-age", + Value: 30, + EnvVar: "BLAST_DISTRIBUTOR_GRPC_LOG_MAX_AGE", + Usage: "Max age of a log file (days)", + }, + cli.BoolFlag{ + Name: "grpc-log-compress", + EnvVar: "BLAST_DISTRIBUTOR_GRPC_LOG_COMPRESS", + Usage: "Compress a log file", + }, + cli.StringFlag{ + Name: "http-log-file", + Value: os.Stderr.Name(), + EnvVar: "BLAST_DISTRIBUTOR_HTTP_LOG_FILE", + Usage: "HTTP access log file", + }, + cli.IntFlag{ + Name: "http-log-max-size", + Value: 500, + EnvVar: "BLAST_DISTRIBUTOR_HTTP_LOG_MAX_SIZE", + Usage: "Max size of a HTTP access log file (megabytes)", + }, + cli.IntFlag{ + Name: "http-log-max-backups", + Value: 3, + EnvVar: "BLAST_DISTRIBUTOR_HTTP_LOG_MAX_BACKUPS", + Usage: "Max backup count of HTTP access log files", + }, + cli.IntFlag{ + Name: "http-log-max-age", + Value: 30, + EnvVar: "BLAST_DISTRIBUTOR_HTTP_LOG_MAX_AGE", + Usage: "Max age of a HTTP access log file (days)", + }, + cli.BoolFlag{ + Name: "http-log-compress", + EnvVar: "BLAST_DISTRIBUTOR_HTTP_LOG_COMPRESS", + Usage: "Compress a HTTP access log", + }, + }, + Action: distributorNodeStart, + }, + { + Name: "health", + Usage: "Health check", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", + }, + cli.BoolFlag{ + Name: "liveness", + Usage: "Liveness probe", + }, + cli.BoolFlag{ + Name: "readiness", + Usage: "Readiness probe", + }, + }, + Action: distributorNodeHealth, }, }, - ArgsUsage: "[id]", - Action: execDeleteNode, }, { - Name: "value", - Usage: "delete value", + Name: "get", + Usage: "Get document(s)", Flags: []cli.Flag{ cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", + }, + cli.StringFlag{ + Name: "file", + Value: "", + Usage: "Document ID list", }, }, - ArgsUsage: "[key] [value]", - Action: execDeleteValue, + ArgsUsage: "[document IDs]", + Action: distributorGet, }, { - Name: "document", - Usage: "delete document", + Name: "index", + Usage: "Index document(s)", Flags: []cli.Flag{ cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", + }, + cli.StringFlag{ + Name: "file", + Value: "", + Usage: "Document list", + }, + cli.BoolFlag{ + Name: "bulk", + Usage: "Bulk indexing", }, }, - ArgsUsage: "[id] ...", - Action: execDeleteDocument, + ArgsUsage: "[document ID] [document fields]", + Action: distributorIndex, }, - }, - }, - { - Name: "watch", - Usage: "watch", - Subcommands: []cli.Command{ { - Name: "cluster", - Usage: "watch cluster", + Name: "delete", + Usage: "Delete document(s)", Flags: []cli.Flag{ cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", + }, + cli.StringFlag{ + Name: "file", + Value: "", + Usage: "Document ID list", }, }, - Action: execWatchCluster, + ArgsUsage: "[document IDs]", + Action: distributorDelete, }, { - Name: "store", - Usage: "watch store", + Name: "search", + Usage: "Search document(s)", Flags: []cli.Flag{ cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", + }, + cli.StringFlag{ + Name: "file", + Value: "", + Usage: "Search request", }, }, - ArgsUsage: "[key]", - Action: execWatchStore, - }, - }, - }, - { - Name: "snapshot", - Usage: "snapshot data", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", - }, - }, - Action: execSnapshot, - }, - { - Name: "search", - Usage: "search documents", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "address to connect to", + ArgsUsage: "[search request]", + Action: distributorSearch, }, }, - ArgsUsage: "[search request]", - Action: execSearch, }, } diff --git a/cmd/blast/readinessprobe.go b/cmd/blast/readinessprobe.go deleted file mode 100644 index 36bf970..0000000 --- a/cmd/blast/readinessprobe.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/mosuka/blast/grpc" - "github.com/urfave/cli" -) - -func execReadinessProbe(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - - client, err := grpc.NewClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - state, err := client.ReadinessProbe() - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) - - return nil -} diff --git a/cmd/blast/set_node.go b/cmd/blast/set_node.go deleted file mode 100644 index a32052a..0000000 --- a/cmd/blast/set_node.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "fmt" - "os" - - "github.com/mosuka/blast/grpc" - "github.com/urfave/cli" -) - -func execSetNode(c *cli.Context) error { - grpcAddr := c.String("grpc-addr") - - nodeId := c.Args().Get(0) - - metadataStr := c.Args().Get(1) - - var metadata map[string]interface{} - err := json.Unmarshal([]byte(metadataStr), &metadata) - if err != nil { - return err - } - - peerClient, err := grpc.NewClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := peerClient.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - err = peerClient.SetNode(nodeId, metadata) - if err != nil { - return err - } - - return nil -} diff --git a/cmd/blastd/main.go b/cmd/blastd/main.go deleted file mode 100644 index e69e527..0000000 --- a/cmd/blastd/main.go +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - "path" - - "github.com/blevesearch/bleve" - "github.com/mosuka/blast/version" - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - app.Name = path.Base(os.Args[0]) - app.Usage = "blastd" - app.Version = version.Version - app.Flags = []cli.Flag{ - cli.StringFlag{ - Name: "log-level", - Value: "INFO", - Usage: "Log level", - }, - cli.StringFlag{ - Name: "log-file", - Value: os.Stderr.Name(), - Usage: "Log file", - }, - cli.IntFlag{ - Name: "log-max-size", - Value: 500, - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "log-max-backups", - Value: 3, - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "log-max-age", - Value: 30, - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "log-compress", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "grpc-log-level", - Value: "INFO", - Usage: "gRPC log level", - }, - cli.StringFlag{ - Name: "grpc-log-file", - Value: os.Stderr.Name(), - Usage: "gRPC log file", - }, - cli.IntFlag{ - Name: "grpc-log-max-size", - Value: 500, - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "grpc-log-max-backups", - Value: 3, - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "grpc-log-max-age", - Value: 30, - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "grpc-log-compress", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "http-access-log-file", - Value: os.Stderr.Name(), - Usage: "HTTP access log file", - }, - cli.IntFlag{ - Name: "http-access-log-max-size", - Value: 500, - Usage: "Max size of a HTTP access log file (megabytes)", - }, - cli.IntFlag{ - Name: "http-access-log-max-backups", - Value: 3, - Usage: "Max backup count of HTTP access log files", - }, - cli.IntFlag{ - Name: "http-access-log-max-age", - Value: 30, - Usage: "Max age of a HTTP access log file (days)", - }, - cli.BoolFlag{ - Name: "http-access-log-compress", - Usage: "Compress a HTTP access log", - }, - } - app.Authors = []cli.Author{ - { - Name: "mosuka", - Email: "minoru.osuka@gmail.com", - }, - } - app.Commands = []cli.Command{ - { - Name: "indexer", - Usage: "Start indexer", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "manager-addr", - Value: "", - Usage: "The gRPC address of the manager node that exists in the federation to be joined", - }, - cli.StringFlag{ - Name: "cluster-id", - Value: "default", - Usage: "Cluster ID", - }, - cli.StringFlag{ - Name: "node-id", - Value: "indexer1", - Usage: "Node ID", - }, - cli.StringFlag{ - Name: "bind-addr", - Value: ":5000", - Usage: "Raft bind address", - }, - cli.StringFlag{ - Name: "grpc-addr", - Value: ":5001", - Usage: "gRPC Server listen address", - }, - cli.StringFlag{ - Name: "http-addr", - Value: ":5002", - Usage: "HTTP server listen address", - }, - cli.StringFlag{ - Name: "data-dir", - Value: "/tmp/blast-index", - Usage: "Data directory", - }, - cli.StringFlag{ - Name: "raft-storage-type", - Value: "boltdb", - Usage: "Raft log storage type to use", - }, - cli.StringFlag{ - Name: "peer-addr", - Value: "", - Usage: "The gRPC address of the peer node that exists in the cluster to be joined", - }, - cli.StringFlag{ - Name: "index-mapping-file", - Value: "", - Usage: "Path to a file containing a JSON representation of an index mapping to use", - }, - cli.StringFlag{ - Name: "index-type", - Value: bleve.Config.DefaultIndexType, - Usage: "Index storage type to use", - }, - cli.StringFlag{ - Name: "index-storage-type", - Value: bleve.Config.DefaultKVStore, - Usage: "Index storage type to use", - }, - }, - Action: startIndexer, - }, - { - Name: "manager", - Usage: "Start manager", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "node-id", - Value: "", - Usage: "Node ID", - }, - cli.StringFlag{ - Name: "bind-addr", - Value: ":15000", - Usage: "The address that should be used to for internal cluster communications", - }, - cli.StringFlag{ - Name: "grpc-addr", - Value: ":15001", - Usage: "The address that should be used to for client communications over gRPC", - }, - cli.StringFlag{ - Name: "http-addr", - Value: ":15002", - Usage: "The address that should be used to for client communications over HTTP", - }, - cli.StringFlag{ - Name: "data-dir", - Value: "./", - Usage: "Data directory", - }, - cli.StringFlag{ - Name: "raft-storage-type", - Value: "boltdb", - Usage: "Raft log storage type to use", - }, - cli.StringFlag{ - Name: "peer-addr", - Value: "", - Usage: "The gRPC address of the peer node that exists in the cluster to be joined", - }, - cli.StringFlag{ - Name: "index-mapping-file", - Value: "", - Usage: "Path to a file containing a JSON representation of an index mapping to use", - }, - cli.StringFlag{ - Name: "index-type", - Value: bleve.Config.DefaultIndexType, - Usage: "Index storage type to use", - }, - cli.StringFlag{ - Name: "index-storage-type", - Value: bleve.Config.DefaultKVStore, - Usage: "Index storage type to use", - }, - }, - Action: startManager, - }, - { - Name: "dispatcher", - Usage: "Start dispatcher", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "manager-addr", - Value: ":15001", - Usage: "Manager address", - }, - cli.StringFlag{ - Name: "grpc-addr", - Value: ":25001", - Usage: "gRPC Server listen address", - }, - cli.StringFlag{ - Name: "http-addr", - Value: ":25002", - Usage: "HTTP server listen address", - }, - }, - Action: startDispatcher, - }, - } - - cli.HelpFlag = cli.BoolFlag{ - Name: "help, h", - Usage: "Show this message", - } - cli.VersionFlag = cli.BoolFlag{ - Name: "version, v", - Usage: "Print the version", - } - - err := app.Run(os.Args) - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } -} diff --git a/dispatcher/grpc_service.go b/dispatcher/grpc_service.go index f69170c..770bbd9 100644 --- a/dispatcher/grpc_service.go +++ b/dispatcher/grpc_service.go @@ -25,6 +25,8 @@ import ( "sync" "time" + "github.com/mosuka/blast/indexutils" + "github.com/blevesearch/bleve" "github.com/blevesearch/bleve/search" "github.com/golang/protobuf/ptypes/any" @@ -782,9 +784,9 @@ func (s *GRPCService) IndexDocument(stream protobuf.Blast_IndexDocumentServer) e } // initialize document list for each cluster - docSet := make(map[string][]map[string]interface{}, 0) + docSet := make(map[string][]*indexutils.Document, 0) for _, clusterId := range clusterIds { - docSet[clusterId] = make([]map[string]interface{}, 0) + docSet[clusterId] = make([]*indexutils.Document, 0) } for { @@ -807,9 +809,10 @@ func (s *GRPCService) IndexDocument(stream protobuf.Blast_IndexDocumentServer) e fields := *ins.(*map[string]interface{}) // document - doc := map[string]interface{}{ - "id": req.Id, - "fields": fields, + doc, err := indexutils.NewDocument(req.Id, fields) + if err != nil { + s.logger.Error(err.Error()) + return status.Error(codes.Internal, err.Error()) } // distribute documents to each cluster based on document id @@ -831,7 +834,7 @@ func (s *GRPCService) IndexDocument(stream protobuf.Blast_IndexDocumentServer) e wg := &sync.WaitGroup{} for clusterId, docs := range docSet { wg.Add(1) - go func(clusterId string, docs []map[string]interface{}, respChan chan respVal) { + go func(clusterId string, docs []*indexutils.Document, respChan chan respVal) { count, err := indexerClients[clusterId].IndexDocument(docs) wg.Done() respChan <- respVal{ diff --git a/dispatcher/http_handler.go b/dispatcher/http_handler.go index 8ce7744..08b0484 100644 --- a/dispatcher/http_handler.go +++ b/dispatcher/http_handler.go @@ -15,11 +15,16 @@ package dispatcher import ( + "bufio" "encoding/json" + "io" "io/ioutil" "net/http" + "strings" "time" + "github.com/mosuka/blast/indexutils" + "github.com/blevesearch/bleve" "github.com/gorilla/mux" "github.com/mosuka/blast/errors" @@ -166,11 +171,20 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer blasthttp.RecordMetrics(start, status, w, r) // create documents - docs := make([]map[string]interface{}, 0) + docs := make([]*indexutils.Document, 0) vars := mux.Vars(r) id := vars["id"] + bulk := func(values []string) bool { + for _, value := range values { + if strings.ToLower(value) == "true" { + return true + } + } + return false + }(r.URL.Query()["bulk"]) + bodyBytes, err := ioutil.ReadAll(r.Body) if err != nil { status = http.StatusInternalServerError @@ -190,8 +204,95 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } if id == "" { - // Indexing documents in bulk - err := json.Unmarshal(bodyBytes, &docs) + if bulk { + s := strings.NewReader(string(bodyBytes)) + reader := bufio.NewReader(s) + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + doc, err := indexutils.NewDocumentFromBytes(docBytes) + if err != nil { + status = http.StatusBadRequest + + msgMap := map[string]interface{}{ + "message": err.Error(), + "status": status, + } + + content, err = blasthttp.NewJSONMessage(msgMap) + if err != nil { + h.logger.Error(err.Error()) + } + + blasthttp.WriteResponse(w, content, status, h.logger) + return + } + docs = append(docs, doc) + } + break + } + status = http.StatusBadRequest + + msgMap := map[string]interface{}{ + "message": err.Error(), + "status": status, + } + + content, err = blasthttp.NewJSONMessage(msgMap) + if err != nil { + h.logger.Error(err.Error()) + } + + blasthttp.WriteResponse(w, content, status, h.logger) + return + } + + if len(docBytes) > 0 { + doc, err := indexutils.NewDocumentFromBytes(docBytes) + if err != nil { + status = http.StatusBadRequest + + msgMap := map[string]interface{}{ + "message": err.Error(), + "status": status, + } + + content, err = blasthttp.NewJSONMessage(msgMap) + if err != nil { + h.logger.Error(err.Error()) + } + + blasthttp.WriteResponse(w, content, status, h.logger) + return + } + docs = append(docs, doc) + } + } + } else { + doc, err := indexutils.NewDocumentFromBytes(bodyBytes) + if err != nil { + status = http.StatusBadRequest + + msgMap := map[string]interface{}{ + "message": err.Error(), + "status": status, + } + + content, err = blasthttp.NewJSONMessage(msgMap) + if err != nil { + h.logger.Error(err.Error()) + } + + blasthttp.WriteResponse(w, content, status, h.logger) + return + } + docs = append(docs, doc) + } + } else { + var fields map[string]interface{} + err = json.Unmarshal(bodyBytes, &fields) if err != nil { status = http.StatusBadRequest @@ -208,10 +309,8 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { blasthttp.WriteResponse(w, content, status, h.logger) return } - } else { - // Indexing a document - var fields map[string]interface{} - err := json.Unmarshal(bodyBytes, &fields) + + doc, err := indexutils.NewDocument(id, fields) if err != nil { status = http.StatusBadRequest @@ -229,11 +328,6 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - doc := map[string]interface{}{ - "id": id, - "fields": fields, - } - docs = append(docs, doc) } @@ -325,23 +419,36 @@ func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } if id == "" { - // Deleting documents in bulk - err := json.Unmarshal(bodyBytes, &ids) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) + s := strings.NewReader(string(bodyBytes)) + reader := bufio.NewReader(s) + for { + docId, err := reader.ReadString('\n') if err != nil { - h.logger.Error(err.Error()) + if err == io.EOF || err == io.ErrClosedPipe { + if docId == "" { + ids = append(ids, docId) + } + break + } + status = http.StatusBadRequest + + msgMap := map[string]interface{}{ + "message": err.Error(), + "status": status, + } + + content, err = blasthttp.NewJSONMessage(msgMap) + if err != nil { + h.logger.Error(err.Error()) + } + + blasthttp.WriteResponse(w, content, status, h.logger) + return } - blasthttp.WriteResponse(w, content, status, h.logger) - return + if docId == "" { + ids = append(ids, docId) + } } } else { // Deleting a document diff --git a/dispatcher/server_test.go b/dispatcher/server_test.go index 58c69cc..bbe660d 100644 --- a/dispatcher/server_test.go +++ b/dispatcher/server_test.go @@ -34,7 +34,7 @@ func TestServer_Start(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) diff --git a/example/geo_doc1.json b/example/geo_doc1.json deleted file mode 100644 index e94b319..0000000 --- a/example/geo_doc1.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "name": "Brewpub-on-the-Green", - "city": "Fremont", - "state": "California", - "code": "", - "country": "United States", - "phone": "", - "website": "", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "", - "address": [], - "geo": { - "accuracy": "APPROXIMATE", - "lat": 37.5483, - "lon": -121.989 - } -} diff --git a/example/geo_doc2.json b/example/geo_doc2.json deleted file mode 100644 index 9ba8bfd..0000000 --- a/example/geo_doc2.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "name": "Capital City Brewing Company", - "city": "Washington", - "state": "District of Columbia", - "code": "20005", - "country": "United States", - "phone": "202.628.2222", - "website": "http://www.capcitybrew.com", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "Washington DC's first brewpub since prohibition, Capitol City Brewing Co. opened its doors in 1992. Our first location still stands in Downtown DC, at 11th and H St., NW. Our company policy is to bring the fine craft of brewing to every person who lives and visits our region, as well as treating them to a wonderful meal and a great experience.", - "address": [ - "1100 New York Ave, NW" - ], - "geo": { - "accuracy": "ROOFTOP", - "lat": 38.8999, - "lon": -77.0272 - } -} diff --git a/example/geo_doc3.json b/example/geo_doc3.json deleted file mode 100644 index 008a467..0000000 --- a/example/geo_doc3.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "name": "Firehouse Grill & Brewery", - "city": "Sunnyvale", - "state": "California", - "code": "94086", - "country": "United States", - "phone": "1-408-773-9500", - "website": "", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "", - "address": [ - "111 South Murphy Avenue" - ], - "geo": { - "accuracy": "RANGE_INTERPOLATED", - "lat": 37.3775, - "lon": -122.03 - } -} diff --git a/example/geo_doc4.json b/example/geo_doc4.json deleted file mode 100644 index a9655d4..0000000 --- a/example/geo_doc4.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "name": "Hook & Ladder Brewing Company", - "city": "Silver Spring", - "state": "Maryland", - "code": "20910", - "country": "United States", - "phone": "301.565.4522", - "website": "http://www.hookandladderbeer.com", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "At Hook & Ladder Brewing we believe in great beer in the company of good friends, so we bring you three great beers for your drinking pleasure (please drink responsibly). Each of our beers is carefully crafted with the finest quality ingredients for a distinctive taste we know you will enjoy. Try one tonight, you just might get hooked. Through our own experiences in the fire and rescue service we have chosen the Hook & Ladder as a symbol of pride and honor to pay tribute to the brave men and women who serve and protect our communities.", - "address": [ - "8113 Fenton St." - ], - "geo": { - "accuracy": "ROOFTOP", - "lat": 38.9911, - "lon": -77.0237 - } -} diff --git a/example/geo_doc5.json b/example/geo_doc5.json deleted file mode 100644 index 24e07b0..0000000 --- a/example/geo_doc5.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "name": "Jack's Brewing", - "city": "Fremont", - "state": "California", - "code": "94538", - "country": "United States", - "phone": "1-510-796-2036", - "website": "", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "", - "address": [ - "39176 Argonaut Way" - ], - "geo": { - "accuracy": "ROOFTOP", - "lat": 37.5441, - "lon": -121.988 - } -} diff --git a/example/geo_doc6.json b/example/geo_doc6.json deleted file mode 100644 index 3c24f34..0000000 --- a/example/geo_doc6.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "name": "Sweet Water Tavern and Brewery", - "city": "Sterling", - "state": "Virginia", - "code": "20121", - "country": "United States", - "phone": "(703) 449-1108", - "website": "http://www.greatamericanrestaurants.com/sweetMainSter/index.htm", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "", - "address": [ - "45980 Waterview Plaza" - ], - "geo": { - "accuracy": "RANGE_INTERPOLATED", - "lat": 39.0324, - "lon": -77.4097 - } -} diff --git a/example/geo_doc_1.json b/example/geo_doc_1.json new file mode 100644 index 0000000..9cbc825 --- /dev/null +++ b/example/geo_doc_1.json @@ -0,0 +1,21 @@ +{ + "id": "1", + "fields": { + "name": "Brewpub-on-the-Green", + "city": "Fremont", + "state": "California", + "code": "", + "country": "United States", + "phone": "", + "website": "", + "type": "brewery", + "updated": "2010-07-22 20:00:20", + "description": "", + "address": [], + "geo": { + "accuracy": "APPROXIMATE", + "lat": 37.5483, + "lon": -121.989 + } + } +} diff --git a/example/geo_doc_2.json b/example/geo_doc_2.json new file mode 100644 index 0000000..0ca3e13 --- /dev/null +++ b/example/geo_doc_2.json @@ -0,0 +1,23 @@ +{ + "id": "2", + "fields": { + "name": "Capital City Brewing Company", + "city": "Washington", + "state": "District of Columbia", + "code": "20005", + "country": "United States", + "phone": "202.628.2222", + "website": "http://www.capcitybrew.com", + "type": "brewery", + "updated": "2010-07-22 20:00:20", + "description": "Washington DC's first brewpub since prohibition, Capitol City Brewing Co. opened its doors in 1992. Our first location still stands in Downtown DC, at 11th and H St., NW. Our company policy is to bring the fine craft of brewing to every person who lives and visits our region, as well as treating them to a wonderful meal and a great experience.", + "address": [ + "1100 New York Ave, NW" + ], + "geo": { + "accuracy": "ROOFTOP", + "lat": 38.8999, + "lon": -77.0272 + } + } +} diff --git a/example/geo_doc_3.json b/example/geo_doc_3.json new file mode 100644 index 0000000..98c79c5 --- /dev/null +++ b/example/geo_doc_3.json @@ -0,0 +1,23 @@ +{ + "id": "3", + "fields": { + "name": "Firehouse Grill & Brewery", + "city": "Sunnyvale", + "state": "California", + "code": "94086", + "country": "United States", + "phone": "1-408-773-9500", + "website": "", + "type": "brewery", + "updated": "2010-07-22 20:00:20", + "description": "", + "address": [ + "111 South Murphy Avenue" + ], + "geo": { + "accuracy": "RANGE_INTERPOLATED", + "lat": 37.3775, + "lon": -122.03 + } + } +} diff --git a/example/geo_doc_4.json b/example/geo_doc_4.json new file mode 100644 index 0000000..fcdc08a --- /dev/null +++ b/example/geo_doc_4.json @@ -0,0 +1,23 @@ +{ + "id": "4", + "fields": { + "name": "Hook & Ladder Brewing Company", + "city": "Silver Spring", + "state": "Maryland", + "code": "20910", + "country": "United States", + "phone": "301.565.4522", + "website": "http://www.hookandladderbeer.com", + "type": "brewery", + "updated": "2010-07-22 20:00:20", + "description": "At Hook & Ladder Brewing we believe in great beer in the company of good friends, so we bring you three great beers for your drinking pleasure (please drink responsibly). Each of our beers is carefully crafted with the finest quality ingredients for a distinctive taste we know you will enjoy. Try one tonight, you just might get hooked. Through our own experiences in the fire and rescue service we have chosen the Hook & Ladder as a symbol of pride and honor to pay tribute to the brave men and women who serve and protect our communities.", + "address": [ + "8113 Fenton St." + ], + "geo": { + "accuracy": "ROOFTOP", + "lat": 38.9911, + "lon": -77.0237 + } + } +} diff --git a/example/geo_doc_5.json b/example/geo_doc_5.json new file mode 100644 index 0000000..e2e6807 --- /dev/null +++ b/example/geo_doc_5.json @@ -0,0 +1,23 @@ +{ + "id": "5", + "fields": { + "name": "Jack's Brewing", + "city": "Fremont", + "state": "California", + "code": "94538", + "country": "United States", + "phone": "1-510-796-2036", + "website": "", + "type": "brewery", + "updated": "2010-07-22 20:00:20", + "description": "", + "address": [ + "39176 Argonaut Way" + ], + "geo": { + "accuracy": "ROOFTOP", + "lat": 37.5441, + "lon": -121.988 + } + } +} diff --git a/example/geo_doc_6.json b/example/geo_doc_6.json new file mode 100644 index 0000000..8ecc9bb --- /dev/null +++ b/example/geo_doc_6.json @@ -0,0 +1,23 @@ +{ + "id": "6", + "fields": { + "name": "Sweet Water Tavern and Brewery", + "city": "Sterling", + "state": "Virginia", + "code": "20121", + "country": "United States", + "phone": "(703) 449-1108", + "website": "http://www.greatamericanrestaurants.com/sweetMainSter/index.htm", + "type": "brewery", + "updated": "2010-07-22 20:00:20", + "description": "", + "address": [ + "45980 Waterview Plaza" + ], + "geo": { + "accuracy": "RANGE_INTERPOLATED", + "lat": 39.0324, + "lon": -77.4097 + } + } +} diff --git a/example/wiki_bulk_delete.json b/example/wiki_bulk_delete.json deleted file mode 100644 index 3f17c76..0000000 --- a/example/wiki_bulk_delete.json +++ /dev/null @@ -1,6 +0,0 @@ -[ - "arwiki_1", - "bgwiki_1", - "cawiki_1", - "zhwiki_1" -] diff --git a/example/wiki_bulk_delete.txt b/example/wiki_bulk_delete.txt new file mode 100644 index 0000000..6f7ddd9 --- /dev/null +++ b/example/wiki_bulk_delete.txt @@ -0,0 +1,4 @@ +arwiki_1 +bgwiki_1 +cawiki_1 +zhwiki_1 diff --git a/example/wiki_bulk_index.json b/example/wiki_bulk_index.json deleted file mode 100644 index 42af51b..0000000 --- a/example/wiki_bulk_index.json +++ /dev/null @@ -1,38 +0,0 @@ -[ - { - "id": "arwiki_1", - "fields": { - "title_ar": "محرك بحث", - "text_ar": "محرك البحث (بالإنجليزية: Search engine) هو نظام لإسترجاع المعلومات صمم للمساعدة على البحث عن المعلومات المخزنة على أي نظام حاسوبي. تعرض نتائج البحث عادة على شكل قائمة لأماكن تواجد المعلومات ومرتبة وفق معايير معينة. تسمح محركات البحث باختصار مدة البحث والتغلب على مشكلة أحجام البيانات المتصاعدة (إغراق معلوماتي).", - "timestamp": "2018-03-25T18:04:00Z", - "_type": "arwiki" - } - }, - { - "id": "bgwiki_1", - "fields": { - "title_bg": "Търсачка", - "text_bg": "Търсачка или търсеща машина (на английски: Web search engine) е специализиран софтуер за извличане на информация, съхранена в компютърна система или мрежа. Това може да е персонален компютър, Интернет, корпоративна мрежа и т.н. Без допълнителни уточнения, най-често под търсачка се разбира уеб(-)търсачка, която търси в Интернет. Други видове търсачки са корпоративните търсачки, които търсят в интранет мрежите, личните търсачки – за индивидуалните компютри и мобилните търсачки. В търсачката потребителят (търсещият) прави запитване за съдържание, отговарящо на определен критерий (обикновено такъв, който съдържа определени думи и фрази). В резултат се получават списък от точки, които отговарят, пълно или частично, на този критерий. Търсачките обикновено използват редовно подновявани индекси, за да оперират бързо и ефикасно. Някои търсачки също търсят в информацията, която е на разположение в нюзгрупите и други големи бази данни. За разлика от Уеб директориите, които се поддържат от хора редактори, търсачките оперират алгоритмично. Повечето Интернет търсачки са притежавани от различни корпорации.", - "timestamp": "2018-07-11T11:03:00Z", - "_type": "bgwiki" - } - }, - { - "id": "cawiki_1", - "fields": { - "title_ca": "Motor de cerca", - "text_ca": "Un motor de cerca o de recerca o bé cercador és un programa informàtic dissenyat per ajudar a trobar informació emmagatzemada en un sistema informàtic com ara una xarxa, Internet, un servidor o un ordinador personal. L'objectiu principal és el de trobar altres programes informàtics, pàgines web i documents, entre d'altres. A partir d'una determinada paraula o paraules o una determinada frase l'usuari demana un contingut sota un criteri determinat i retorna una llista de referències que compleixin aquest criteri. El procés es realitza a través de les metadades, vies per comunicar informació que utilitzen els motors per cada cerca. Els índex que utilitzen els cercadors sempre estan actualitzats a través d'un robot web per generar rapidesa i eficàcia en la recerca. Els directoris, en canvi, són gestionats per editors humans.", - "timestamp": "2018-07-09T18:07:00Z", - "_type": "cawiki" - } - }, - { - "id": "zhwiki_1", - "fields": { - "title_zh": "搜索引擎", - "text_zh": "搜索引擎(英语:search engine)是一种信息检索系统,旨在协助搜索存储在计算机系统中的信息。搜索结果一般被称为“hits”,通常会以表单的形式列出。网络搜索引擎是最常见、公开的一种搜索引擎,其功能为搜索万维网上储存的信息.", - "timestamp": "2018-08-27T05:47:00Z", - "_type": "zhwiki" - } - } -] diff --git a/example/wiki_bulk_index.txt b/example/wiki_bulk_index.txt new file mode 100644 index 0000000..32a0fbd --- /dev/null +++ b/example/wiki_bulk_index.txt @@ -0,0 +1,36 @@ +{"id":"arwiki_1","fields":{"title_ar":"محرك بحث","text_ar":"محرك البحث (بالإنجليزية: Search engine) هو نظام لإسترجاع المعلومات صمم للمساعدة على البحث عن المعلومات المخزنة على أي نظام حاسوبي. تعرض نتائج البحث عادة على شكل قائمة لأماكن تواجد المعلومات ومرتبة وفق معايير معينة. تسمح محركات البحث باختصار مدة البحث والتغلب على مشكلة أحجام البيانات المتصاعدة (إغراق معلوماتي).","timestamp":"2018-03-25T18:04:00Z","_type":"arwiki"}} +{"id":"bgwiki_1","fields":{"title_bg":"Търсачка","text_bg":"Търсачка или търсеща машина (на английски: Web search engine) е специализиран софтуер за извличане на информация, съхранена в компютърна система или мрежа. Това може да е персонален компютър, Интернет, корпоративна мрежа и т.н. Без допълнителни уточнения, най-често под търсачка се разбира уеб(-)търсачка, която търси в Интернет. Други видове търсачки са корпоративните търсачки, които търсят в интранет мрежите, личните търсачки – за индивидуалните компютри и мобилните търсачки. В търсачката потребителят (търсещият) прави запитване за съдържание, отговарящо на определен критерий (обикновено такъв, който съдържа определени думи и фрази). В резултат се получават списък от точки, които отговарят, пълно или частично, на този критерий. Търсачките обикновено използват редовно подновявани индекси, за да оперират бързо и ефикасно. Някои търсачки също търсят в информацията, която е на разположение в нюзгрупите и други големи бази данни. За разлика от Уеб директориите, които се поддържат от хора редактори, търсачките оперират алгоритмично. Повечето Интернет търсачки са притежавани от различни корпорации.","timestamp":"2018-07-11T11:03:00Z","_type":"bgwiki"}} +{"id":"cawiki_1","fields":{"title_ca":"Motor de cerca","text_ca":"Un motor de cerca o de recerca o bé cercador és un programa informàtic dissenyat per ajudar a trobar informació emmagatzemada en un sistema informàtic com ara una xarxa, Internet, un servidor o un ordinador personal. L'objectiu principal és el de trobar altres programes informàtics, pàgines web i documents, entre d'altres. A partir d'una determinada paraula o paraules o una determinada frase l'usuari demana un contingut sota un criteri determinat i retorna una llista de referències que compleixin aquest criteri. El procés es realitza a través de les metadades, vies per comunicar informació que utilitzen els motors per cada cerca. Els índex que utilitzen els cercadors sempre estan actualitzats a través d'un robot web per generar rapidesa i eficàcia en la recerca. Els directoris, en canvi, són gestionats per editors humans.","timestamp":"2018-07-09T18:07:00Z","_type":"cawiki"}} +{"id":"cswiki_1","fields":{"title_cs":"Vyhledávač","text_cs":"Vyhledávač je počítačový systém či program, který umožňuje uživateli zadat nějaký libovolný nebo specifikovaný vyhledávaný výraz a získat z velkého objemu dat informace, které jsou v souladu s tímto dotazem. Jako vyhledávač se označují i ​​webové stránky, jejichž hlavní funkcí je poskytování takového systému či programu. Jako internetový vyhledávač se označuje buď vyhledávač, na který se přistupuje přes internet, nebo vyhledávač, jehož zdrojem vyhledávání je internet (tj. WWW, Usenet apod.). Jako online vyhledávač se označuje vyhledávač, při jehož výkonu činnosti dochází k výměně dat v rámci nějaké počítačové sítě, nejčastěji to je internetový vyhledávač. Fulltextový vyhledávač je vyhedávač, který vykonává fulltextové vyhledávání.","timestamp":"2017-11-10T21:59:00Z","_type":"cswiki"}} +{"id":"dawiki_1","fields":{"title_da":"Søgemaskine","text_da":"En søgemaskine er en applikation til at hjælpe en bruger med at finde information. Det kan f.eks. være at finde filer med bestemte data (f.eks. ord), gemt i en computers hukommelse, for eksempel via World Wide Web (kaldes så en websøgemaskine). Ofte bruges søgemaskine fejlagtigt om linkkataloger eller Netguider.","timestamp":"2017-09-04T01:54:00Z","_type":"dawiki"}} +{"id":"dewiki_1","fields":{"title_de":"Suchmaschine","text_de":"Eine Suchmaschine ist ein Programm zur Recherche von Dokumenten, die in einem Computer oder einem Computernetzwerk wie z. B. dem World Wide Web gespeichert sind. Internet-Suchmaschinen haben ihren Ursprung in Information-Retrieval-Systemen. Sie erstellen einen Schlüsselwort-Index für die Dokumentbasis, um Suchanfragen über Schlüsselwörter mit einer nach Relevanz geordneten Trefferliste zu beantworten. Nach Eingabe eines Suchbegriffs liefert eine Suchmaschine eine Liste von Verweisen auf möglicherweise relevante Dokumente, meistens dargestellt mit Titel und einem kurzen Auszug des jeweiligen Dokuments. Dabei können verschiedene Suchverfahren Anwendung finden.","timestamp":"2017-09-04T01:54:00Z","_type":"dewiki"}} +{"id":"elwiki_1","fields":{"title_el":"Μηχανή αναζήτησης","text_el":"Μια μηχανή αναζήτησης είναι μια εφαρμογή που επιτρέπει την αναζήτηση κειμένων και αρχείων στο Διαδίκτυο. Αποτελείται από ένα πρόγραμμα υπολογιστή που βρίσκεται σε έναν ή περισσότερους υπολογιστές στους οποίους δημιουργεί μια βάση δεδομένων με τις πληροφορίες που συλλέγει από το διαδίκτυο, και το διαδραστικό περιβάλλον που εμφανίζεται στον τελικό χρήστη ο οποίος χρησιμοποιεί την εφαρμογή από άλλον υπολογιστή συνδεδεμένο στο διαδίκτυο. Οι μηχανές αναζήτησης αποτελούνται από 3 είδη λογισμικού, το spider software, το index software και το query software.","timestamp":"2017-11-21T19:57:00Z","_type":"elwiki"}} +{"id":"enwiki_1","fields":{"title_en":"Search engine (computing)","text_en":"A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.","timestamp":"2018-07-04T05:41:00Z","_type":"enwiki"}} +{"id":"eswiki_1","fields":{"title_es":"Motor de búsqueda","text_es":"Un motor de búsqueda o buscador es un sistema informático que busca archivos almacenados en servidores web gracias a su spider (también llamado araña web). Un ejemplo son los buscadores de Internet (algunos buscan únicamente en la web, pero otros lo hacen además en noticias, servicios como Gopher, FTP, etc.) cuando se pide información sobre algún tema. Las búsquedas se hacen con palabras clave o con árboles jerárquicos por temas; el resultado de la búsqueda «Página de resultados del buscador» es un listado de direcciones web en los que se mencionan temas relacionados con las palabras clave buscadas. Como operan de forma automática, los motores de búsqueda contienen generalmente más información que los directorios. Sin embargo, estos últimos también han de construirse a partir de búsquedas (no automatizadas) o bien a partir de avisos dados por los creadores de páginas.","timestamp":"2018-08-30T11:30:00Z","_type":"eswiki"}} +{"id":"fawiki_1","fields":{"title_fa":"موتور جستجو (پردازش)","text_fa":"موتور جستجو یا جویشگر، در فرهنگ رایانه، به طور عمومی به برنامه‌ای گفته می‌شود که کلمات کلیدی را در یک سند یا بانک اطلاعاتی جستجو می‌کند. در اینترنت به برنامه‌ای گفته می‌شود که کلمات کلیدی موجود در فایل‌ها و سندهای وب جهانی، گروه‌های خبری، منوهای گوفر و آرشیوهای FTP را جستجو می‌کند. جویشگرهای زیادی وجود دارند که امروزه از معروفترین و پراستفاده‌ترین آنها می‌توان به google و یاهو! جستجو اشاره کرد.","timestamp":"2017-01-06T02:46:00Z","_type":"fawiki"}} +{"id":"fiwiki_1","fields":{"title_fi":"Hakukone","text_fi":"Hakukone on web-pohjainen ohjelma, joka etsii jatkuvasti Internetistä (varsinkin Webistä) uusia sivuja eritellen ja liittäen ne hakemistoonsa erityisten hakusanojen mukaan. Näitä hyväksi käyttäen hakukone tulostaa käyttäjän syöttämiä hakusanoja lähimpänä olevat sivut. Analysointi tapahtuu käytännössä eri hakukoneissa erilaisilla menetelmillä.","timestamp":"2017-10-04T14:33:00Z","_type":"fiwiki"}} +{"id":"frwiki_1","fields":{"title_fr":"Moteur de recherche","text_fr":"Un moteur de recherche est une application web permettant de trouver des ressources à partir d'une requête sous forme de mots. Les ressources peuvent être des pages web, des articles de forums Usenet, des images, des vidéos, des fichiers, etc. Certains sites web offrent un moteur de recherche comme principale fonctionnalité ; on appelle alors « moteur de recherche » le site lui-même. Ce sont des instruments de recherche sur le web sans intervention humaine, ce qui les distingue des annuaires. Ils sont basés sur des « robots », encore appelés « bots », « spiders «, « crawlers » ou « agents », qui parcourent les sites à intervalles réguliers et de façon automatique pour découvrir de nouvelles adresses (URL). Ils suivent les liens hypertextes qui relient les pages les unes aux autres, les uns après les autres. Chaque page identifiée est alors indexée dans une base de données, accessible ensuite par les internautes à partir de mots-clés. C'est par abus de langage qu'on appelle également « moteurs de recherche » des sites web proposant des annuaires de sites web : dans ce cas, ce sont des instruments de recherche élaborés par des personnes qui répertorient et classifient des sites web jugés dignes d'intérêt, et non des robots d'indexation. Les moteurs de recherche ne s'appliquent pas qu'à Internet : certains moteurs sont des logiciels installés sur un ordinateur personnel. Ce sont des moteurs dits « de bureau » qui combinent la recherche parmi les fichiers stockés sur le PC et la recherche parmi les sites Web — on peut citer par exemple Exalead Desktop, Google Desktop et Copernic Desktop Search, Windex Server, etc. On trouve également des métamoteurs, c'est-à-dire des sites web où une même recherche est lancée simultanément sur plusieurs moteurs de recherche, les résultats étant ensuite fusionnés pour être présentés à l'internaute. On peut citer dans cette catégorie Ixquick, Mamma, Kartoo, Framabee ou Lilo.","timestamp":"2018-05-30T15:15:00Z","_type":"frwiki"}} +{"id":"gawiki_1","fields":{"title_ga":"Inneall cuardaigh","text_ga":"Acmhainn ar an ngréasán domhanda atá insroichte le brabhsálaí Gréasáin, a chabhraíonn leis an úsáideoir ionaid is eolas a aimsiú. Bíonn na hinnill cuardaigh (Yahoo, Lycos, Google, Ask Jeeves) ag cuardach tríd an ngréasán an t-am ar fad, ag tógáil innéacsanna ábhar éagsúla — mar shampla, ag aimsiú teidil, fotheidil, eochairfhocail is céadlínte cáipéisí. Uaidh sin, is féidir cuid mhaith cáipéisí éagsúla ar ábhar ar leith a aisghabháil. Déanann an cuardach leanúnach cinnte de go bhfuil na hinnéacsanna suas chun dáta. Mar sin féin, aisghabhann na hinnill an-chuid cháipéisí nach mbaineann le hábhar, agus tá an-iarracht ar siúl an t-am ar fad iad a fheabhsú.","timestamp":"2013-10-27T18:17:00Z","_type":"gawiki"}} +{"id":"glwiki_1","fields":{"title_gl":"Motor de busca","text_gl":"Un motor de busca ou buscador é un sistema informático que procura arquivos almacenados en servidores web, un exemplo son os buscadores de internet (algúns buscan só na Web pero outros buscan ademais en News, Gopher, FTP etc.) cando lles pedimos información sobre algún tema. As procuras fanse con palabras clave ou con árbores xerárquicas por temas; o resultado da procura é unha listaxe de direccións Web nas que se mencionan temas relacionados coas palabras clave buscadas.","timestamp":"2016-10-31T13:33:00Z","_type":"glwiki"}} +{"id":"guwiki_1","fields":{"title_gu":"વેબ શોધ એન્જીન","text_gu":"વેબ શોધ એન્જિન એ વર્લ્ડ વાઈડ વેબ (World Wide Web) પર વિવિધ માહિતી શોધવા માટે ઉપયોગમાં લેવામાં આવે છે. શોધ લીસ્ટને સામાન્ય રીતે યાદીમાં દર્શાવવામાં આવે છે અને જેને સામાન્ય રીતે હીટ્સ કહેવામાં આવે છે. જે માહિતી મળે છે તેમાં વેબ પૃષ્ઠ (web page), છબીઓ, માહિતી અને અન્ય પ્રકારની ફાઈલો હોય છે. કેટલાક શોધ એન્જિનો ન્યુઝબુક, ડેટાબેઝ અને અન્ય પ્રકારની ઓપન ડીરેક્ટરી (open directories)ઓની વિગતો પણ આપે છે. વ્યકિતઓ દ્વારા દુરસ્ત થતી વેબ ડાયરેક્ટરીઝ (Web directories)થી અલગ રીતે, શોધ એન્જિન ઍલ્ગરિધમનો અથવા ઍલ્ગરિધમ (algorithmic) અને માનવીય બાબતોના મિક્ષણનો ઉપયોગ કરે છે.","timestamp":"2013-04-04T19:28:00Z","_type":"guwiki"}} +{"id":"hiwiki_1","fields":{"title_hi":"खोज इंजन","text_hi":"ऐसे कम्प्यूटर प्रोग्राम खोजी इंजन (search engine) कहलाते हैं जो किसी कम्प्यूटर सिस्टम पर भण्डारित सूचना में से वांछित सूचना को ढूढ निकालते हैं। ये इंजन प्राप्त परिणामों को प्रायः एक सूची के रूप में प्रस्तुत करते हैं जिससे वांछित सूचना की प्रकृति और उसकी स्थिति का पता चलता है। खोजी इंजन किसी सूचना तक अपेक्षाकृत बहुत कम समय में पहुँचने में हमारी सहायता करते हैं। वे 'सूचना ओवरलोड' से भी हमे बचाते हैं। खोजी इंजन का सबसे प्रचलित रूप 'वेब खोजी इंजन' है जो वर्ल्ड वाइड वेब पर सूचना खोजने के लिये प्रयुक्त होता है।","timestamp":"2017-10-19T20:09:00Z","_type":"hiwiki"}} +{"id":"huwiki_1","fields":{"title_hu":"Keresőmotor","text_hu":"A keresőmotor az informatikában egy program vagy alkalmazás, amely bizonyos feltételeknek (többnyire egy szónak vagy kifejezésnek) megfelelő információkat keres valamilyen számítógépes környezetben. Ez a cikk a World Wide Weben (és esetleg az internet más részein, például a Useneten) kereső alkalmazásokról szól, a keresőmotor kifejezés önmagában általában ezekre vonatkozik. Másfajta keresőmotorokra példák a vállalati keresőmotorok, amik egy intraneten, és a személyi keresőmotorok, amik egy személyi számítógép állományai között keresnek.","timestamp":"2018-05-15T20:40:00Z","_type":"huwiki"}} +{"id":"hywiki_1","fields":{"title_hy":"Որոնողական համակարգ","text_hy":"Որոնողական համակարգը գործիք է, որը նախատեսված է համապատասխան բառերով Համաշխարհային ցանցում որոնումներ կատարելու համար։ Ստեղծված է համացանցում և FTP սերվերներում ինֆորմացիա փնտրելու համար։ Փնտրված արդյունքները ընդհանրապես ներկայացվում են արդյունքների ցանկում և սովորաբար կոչվում են նպատակակակետ, հիթ։ Ինֆորմացիան կարող է բաղկացած լինել վեբ էջերից, նկարներից, ինֆորմացիաներից և այլ տիպի ֆայլերից ու տվյալներից։ Այն կարող է օգտագործվել տարբեր տեսակի տեղեկատվություն որոնելու համար, ներառյալ՝ կայքեր, ֆորումներ, նկարներ, վիդեոներ, ֆայլեր և այլն։ Որոշ կայքեր արդեն իրենցից ներկայացնում են ինչ-որ որոնողական համակարգ, օրինակ՝ Dailymotion, YouTube և Google Videos ինտերնետում տեղադրված տեսահոլովակների որոնողական կայքեր են։ Որոնողական կայքը բաղկացած է \"ռոբոտներից\", որոնց անվանում են նաև bot, spider, crawler, որոնք ավտոմատ կերպով, առանց մարդկային միջամտության պարբերաբար հետազոտում են կայքերը։ Որոնողական կայքերը հետևում են հղումներին, որոնք կապված լինելով իրար հետ ինդեքսավորում է յուրաքանչյուր էջ տվյալների բազայում՝ հետագայում բանալի բառերի օգնությամբ դառնալով հասանելի ինտերնետից օգտվողների համար։ Սխալմամբ, որոնողական կայքեր են անվանում նաև այն կայքերը, որոնք իրենցից ներկայացնում են կայքային տեղեկատուներ։ Այս կայքերում ուշադրության արժանի կայքերը ցուցակագրվում և դասակարգվում են մարդկային ռեսուրսների շնորհիվ, այլ ոչ թե բոտերի կամ ռոբետների միջոցով։ Այդ կայքերից կարելի է նշել օրինակ՝ Yahoo!։ Yahoo!-ի որոնողական կայքը գտնվում է այստեղ։ Բոլոր որոնողական համակարգերը նախատեսված են ինտերնետում որոնում իրականացնելու համար, սակայն կան որոշ որոնողական համակարգերի տարատեսակներ, որոնք համակարգչային ծրագրեր են և հետևաբար տեղակայվում են համակարգչի մեջ։ Այս համակարգերը կոչվում են desktop։ Վերջիներս հնարավորություն են տալիս որոնելու թե համակարգչի մեջ կուտակված ֆայլեը, թե կայքերում տեղադրված ռեսուրսները։ Այդ ծրագրերից ամենահայտնիներն են՝ Exalead Desktop, Copernic Desktop Search Գոյություն ունեն նաև մետա-որոնողական համակարգեր, այսինքն կայքեր, որ նույն որոնումը կատարում են միաժամանակ տարբեր որոնողական կայքերի միջնորդությամբ։ Որոնման արդյունքները հետո դասակարգվում են որպեսզի ներկայացվեն օգտագործողին։ Մետա-որոնողական համակարգերի շարքից կարելի է թվարկել օրինակ՝ Mamma և Kartoo։","timestamp":"2017-11-20T17:47:00Z","_type":"hywiki"}} +{"id":"idwiki_1","fields":{"title_id":"Mesin pencari web","text_id":"Mesin pencari web atau mesin telusur web (bahasa Inggris: web search engine) adalah program komputer yang dirancang untuk melakukan pencarian atas berkas-berkas yang tersimpan dalam layanan www, ftp, publikasi milis, ataupun news group dalam sebuah ataupun sejumlah komputer peladen dalam suatu jaringan. Mesin pencari merupakan perangkat penelusur informasi dari dokumen-dokumen yang tersedia. Hasil pencarian umumnya ditampilkan dalam bentuk daftar yang seringkali diurutkan menurut tingkat akurasi ataupun rasio pengunjung atas suatu berkas yang disebut sebagai hits. Informasi yang menjadi target pencarian bisa terdapat dalam berbagai macam jenis berkas seperti halaman situs web, gambar, ataupun jenis-jenis berkas lainnya. Beberapa mesin pencari juga diketahui melakukan pengumpulan informasi atas data yang tersimpan dalam suatu basis data ataupun direktori web. Sebagian besar mesin pencari dijalankan oleh perusahaan swasta yang menggunakan algoritme kepemilikan dan basis data tertutup, di antaranya yang paling populer adalah safari Google (MSN Search dan Yahoo!). Telah ada beberapa upaya menciptakan mesin pencari dengan sumber terbuka (open source), contohnya adalah Htdig, Nutch, Egothor dan OpenFTS.","timestamp":"2017-11-20T17:47:00Z","_type":"idwiki"}} +{"id":"itwiki_1","fields":{"title_it":"Motore di ricerca","text_it":"Nell'ambito delle tecnologie di Internet, un motore di ricerca (in inglese search engine) è un sistema automatico che, su richiesta, analizza un insieme di dati (spesso da esso stesso raccolti) e restituisce un indice dei contenuti disponibili[1] classificandoli in modo automatico in base a formule statistico-matematiche che ne indichino il grado di rilevanza data una determinata chiave di ricerca. Uno dei campi in cui i motori di ricerca trovano maggiore utilizzo è quello dell'information retrieval e nel web. I motori di ricerca più utilizzati nel 2017 sono stati: Google, Bing, Baidu, Qwant, Yandex, Ecosia, DuckDuckGo.","timestamp":"2018-07-16T12:20:00Z","_type":"itwiki"}} +{"id":"jawiki_1","fields":{"title_ja":"検索エンジン","text_ja":"検索エンジン(けんさくエンジン、英語: search engine)は、狭義にはインターネットに存在する情報(ウェブページ、ウェブサイト、画像ファイル、ネットニュースなど)を検索する機能およびそのプログラム。インターネットの普及初期には、検索としての機能のみを提供していたウェブサイトそのものを検索エンジンと呼んだが、現在では様々なサービスが加わったポータルサイト化が進んだため、検索をサービスの一つとして提供するウェブサイトを単に検索サイトと呼ぶことはなくなっている。広義には、インターネットに限定せず情報を検索するシステム全般を含む。狭義の検索エンジンは、ロボット型検索エンジン、ディレクトリ型検索エンジン、メタ検索エンジンなどに分類される。広義の検索エンジンとしては、ある特定のウェブサイト内に登録されているテキスト情報の全文検索機能を備えたソフトウェア(全文検索システム)等がある。検索エンジンは、検索窓と呼ばれるボックスにキーワードを入力して検索をかけるもので、全文検索が可能なものと不可能なものとがある。検索サイトを一般に「検索エンジン」と呼ぶことはあるが、厳密には検索サイト自体は検索エンジンでない。","timestamp":"2018-05-30T00:52:00Z","_type":"jawiki"}} +{"id":"knwiki_1","fields":{"title_kn":"ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ","text_kn":"ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ ಎಂದರೆ World Wide Webನಲ್ಲಿ ಮಾಹಿತಿ ಹುಡುಕುವುದಕ್ಕಾಗಿ ವಿನ್ಯಾಸಗೊಳಿಸಲಾದ ಒಂದು ಸಾಧನ. ಹುಡುಕಾಟದ ಫಲಿತಾಂಶಗಳನ್ನು ಸಾಮಾನ್ಯವಾಗಿ ಒಂದು ಪಟ್ಟಿಯ ರೂಪದಲ್ಲಿ ಪ್ರಸ್ತುತಪಡಿಸಲಾಗುತ್ತದೆ ಮತ್ತು ಇವನ್ನು ’ಹಿಟ್ಸ್’ ಎಂದು ಕರೆಯಲಾಗುತ್ತದೆ. ಈ ಮಾಹಿತಿಯು ಅನೇಕ ಜಾಲ ಪುಟಗಳು, ಚಿತ್ರಗಳು, ಮಾಹಿತಿ ಹಾಗೂ ಇತರೆ ಕಡತಗಳನ್ನು ಹೊಂದಿರಬಹುದು. ಕೆಲವು ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಬೇರೆ ದತ್ತಸಂಚಯಗಳು ಅಥವಾ ಮುಕ್ತ ಮಾಹಿತಿ ಸೂಚಿಗಳಿಂದ ದತ್ತಾಂಶಗಳ ಗಣಿಗಾರಿಕೆ ಮಾಡಿ ಹೊರತೆಗೆಯುತ್ತವೆ. ಜಾಲ ಮಾಹಿತಿಸೂಚಿಗಳನ್ನು ಸಂಬಂಧಿಸಿದ ಸಂಪಾದಕರು ನಿರ್ವಹಿಸಿದರೆ, ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಗಣನಪದ್ಧತಿಯ ಮೂಲಕ ಅಥವಾ ಗಣನಪದ್ಧತಿ ಮತ್ತು ಮಾನವ ಹೂಡುವಳಿಯ ಮಿಶ್ರಣದ ಮುಖಾಂತರ ಕಾರ್ಯನಿರ್ವಹಿಸುತ್ತವೆ.","timestamp":"2017-10-03T14:13:00Z","_type":"knwiki"}} +{"id":"kowiki_1","fields":{"title_cjk":"검색 엔진","text_cjk":"검색 엔진은 컴퓨터 시스템에 저장된 정보를 찾아주는 것을 도와주도록 설계된 정보 검색 시스템이다. 이러한 검색 결과는 목록으로 표현되는 것이 보통이다. 검색 엔진을 사용하면 정보를 찾는데 필요한 시간을 최소화할 수 있다. 가장 눈에 띄는 형태의 공용 검색 엔진으로는 웹 검색 엔진이 있으며 월드 와이드 웹에서 정보를 찾아준다.","timestamp":"2017-11-19T12:50:00Z","_type":"kowiki"}} +{"id":"mlwiki_1","fields":{"title_ml":"വെബ് സെർച്ച് എഞ്ചിൻ","text_ml":"വേൾഡ് വൈഡ് വെബ്ബിലുള്ള വിവരങ്ങൾ തിരയാനുള്ള ഒരു ഉപാധിയാണ്‌ വെബ് സെർച്ച് എഞ്ചിൻ അഥവാ സെർച്ച് എഞ്ചിൻ. തിരച്ചിൽ ഫലങ്ങൾ സാധാരണായായി ഒരു പട്ടികയായി നൽകുന്നു, തിരച്ചിൽ ഫലങ്ങളെ ഹിറ്റുകൾ എന്നാണ്‌ വിളിച്ചുവരുന്നത്[അവലംബം ആവശ്യമാണ്]. തിരച്ചിൽ ഫലങ്ങളിൽ വെബ് പേജുകൾ, ചിത്രങ്ങൾ, വിവരങ്ങൾ, വെബ്ബിലുള്ള മറ്റ് ഫയൽ തരങ്ങൾ എന്നിവ ഉൾപ്പെടാം. അൽഗോരിതങ്ങൾ ഉപയോഗിച്ചാണ് സെർച്ച് എഞ്ചിനുകൾ പ്രവർത്തിക്കുന്നത്.","timestamp":"2010-05-05T15:06:00Z","_type":"mlwiki"}} +{"id":"nlwiki_1","fields":{"title_nl":"Zoekmachine","text_nl":"Een zoekmachine is een computerprogramma waarmee informatie kan worden gezocht in een bepaalde collectie; dit kan een bibliotheek, het internet, of een persoonlijke verzameling zijn. Zonder nadere aanduiding wordt meestal een webdienst bedoeld waarmee met behulp van vrije trefwoorden volledige tekst (full text) kan worden gezocht in het gehele wereldwijde web. In tegenstelling tot startpagina's of webgidsen is er geen of zeer weinig menselijke tussenkomst nodig; het bezoeken van de webpagina's en het sorteren van de rangschikkingen gebeurt met behulp van een algoritme. Google is wereldwijd de meest gebruikte zoekmachine, andere populaire zoekmachines zijn Yahoo!, Bing en Baidu.","timestamp":"2018-05-07T11:05:00Z","_type":"nlwiki"}} +{"id":"nowiki_1","fields":{"title_no":"Søkemotor","text_no":"En søkemotor er en type programvare som leter frem informasjon fra Internett (nettsider eller andre nettressurser) eller begrenset til et datasystem, der informasjonen samsvarer med et gitt søk, og rangerer treffene etter hva den oppfatter som mest relevant. Typisk ligger søkemotoren tilgjengelig som et nettsted, der brukeren legger inn søkeord ev. sammen med filterinnstillinger, og treffene vises gjerne som klikkbare lenker. Søkemotoren kan enten gjøre søk på hele Internett (for eksempel Google, Bing, Kvasir og Yahoo!), innenfor et bestemt nettsted (for eksempel søk innenfor VGs nettavis), eller innenfor et bestemt tema (f.eks. Kelkoo, som søker etter priser på produkter, og Picsearch, som søker etter bilder). En bedrift kan også sette opp en intern bedrifts-søkemotor for å få enklere tilgang til alle dokumenter og databaser i bedriften.","timestamp":"2018-02-05T14:15:00Z","_type":"nowiki"}} +{"id":"pswiki_1","fields":{"title_ps":"انټرنټ لټوونکی ماشين","text_ps":"نټرنټ د معلوماتو يوه داسې پراخه نړۍ ده چې يوه پوله هم نه لري. هره ثانيه په زرگونو معلوماتي توکي په کې ورځای کېږي، خو بيا هم د ډکېدو کومه اندېښنه نه رامنځته کېږي. حيرانوونکې خبره بيا دا ده چې دغه ټول معلومات په داسې مهارت سره په دغه نړۍ کې ځای شوي دي، چې سړی يې د سترگو په رپ کې د نړۍ په هر گوټ کې ترلاسه کولای شي. د کيبورډ په يو دوو تڼيو زور کولو او د موږك په يو دوو کليکونو سره خپلو ټولو پوښتنو ته ځواب موندلای شئ. ټول معلومات په ځانگړو انټرنټ پاڼو کې خوندي وي، نو که سړي ته د يوې پاڼې پته معلومه وي نو سم له لاسه به دغه پاڼه د انټرنټ پاڼو په کتونکي پروگرام کې پرانيزي، خو که سړی بيا يو معلومات غواړي او د هغې پاڼې پته ورسره نه وي، چې دغه ځانگړي معلومات په كې ځای شوي دي، نو بيا سړی يوه داسې پياوړي ځواک ته اړتيا لري، چې د سترگو په رپ کې ټول انټرنټ چاڼ کړي او دغه ځانگړي معلومات راوباسي. له نېکه مرغه د دغه ځواک غم خوړل شوی دی او ډېرInternet Search Engine انټرنټ لټوونکي ماشينونه جوړ کړای شوي دي، چې په وړيا توگه ټول انټرنټ تر ثانيو هم په لږ وخت کې چاڼ کوي او زموږ د خوښې معلومات راښکاره کوي. دغو ماشينونو ته سړی يوه ځانگړې کليمه ورکوي او هغوی ټول انټرنټ په دغې وركړل شوې کلمې پسې لټوي او هر دقيق معلومات چې لاسته ورځي، نو د کمپيوټر پر پرده يې راښکاره کوي. د دغو ماشينونو په ډله کې يو پياوړی ماشين د Google په نوم دی. د نوموړي ماشين بنسټ په ١٩٩٨م کال کې د متحدو ايالاتو د Standford پوهنتون دوو محصلينو Larry Page او Sergey Brin کښېښود. د دغه ماشين خدمات سړی د www.google.com په انټرنټ پاڼه کې کارولای شي. نوموړی ماشين د نړۍ په گڼ شمېر ژبو باندې خدمات وړاندې کوي او داسې چټک او دقيق لټون کوي چې د انټرنټ نور ډېر غښتلي ماشينونه ورته گوته پر غاښ پاتې دي. گوگل په ټوله نړۍ کې کارول کېږي او تر نيمي ثانيي هم په لنډ وخت کې په ميليارډونو انټرنټ پاڼې چاڼ کوي او خپلو کاروونکو ته په پرتله ييزه توگه دقيق معلومات راباسي. گوگل په يوه ورځ کې څه كمُ ٢٠٠ ميليونه پوښتنې ځوابوي. دا ( گوگل) تورى خپله د يو امريکايي رياضيپوه د وراره له خوا په لومړي ځل د يوې لوبې لپاره کارول شوی و. هغه دغه تورى د يو سلو صفرونو ( 1000?.) غوندې لوی عدد ته د نوم په توگه کاراوه. دغه نوم د نوموړي شرکت د دغه توان ښكارندوى دى، چې په لنډ وخت کې په لويه کچه پوښتنو ته ځواب ورکوي او معلومات لټوي. سړی چې د گوگل چټکتيا او دقيقوالي ته ځير شي، نو دا پوښته راپورته کېږي چې د دې ماشين شا ته به څومره پرمختللي کمپيوټرونه او پياوړی تخنيک پټ وي. خو اصلاً د گوگل شا ته په يوه لوی جال کې د منځنۍ بيې کمپيوټرونه سره نښلول شوي دي . په دې توگه په زرگونو کمپيوټرونه هممهاله په کار بوخت وي، چې په ترڅ کې يې د معلوماتو لټول او چاڼ کول چټکتيا مومي. د يوې پوښتنې له اخيستلو څخه راواخله معلوماتو تر لټولو او بيا د دقيقوالي له مخې په يوه ځانگړي طرز بېرته کاروونکي يا پوښتونكي تر ښوولو پورې ټولې چارې د درېيو Software پروگرامونه په لاس کې دي، چې په دغه زرگونو کمپيوټرونو کې ځای پر ځای شوي دي.","timestamp":"2015-12-15T18:53:00Z","_type":"pswiki"}} +{"id":"ptwiki_1","fields":{"title_pt":"Motor de busca","text_pt":"Motor de pesquisa (português europeu) ou ferramenta de busca (português brasileiro) ou buscador (em inglês: search engine) é um programa desenhado para procurar palavras-chave fornecidas pelo utilizador em documentos e bases de dados. No contexto da internet, um motor de pesquisa permite procurar palavras-chave em documentos alojados na world wide web, como aqueles que se encontram armazenados em websites. Os motores de busca surgiram logo após o aparecimento da Internet, com a intenção de prestar um serviço extremamente importante: a busca de qualquer informação na rede, apresentando os resultados de uma forma organizada, e também com a proposta de fazer isto de uma maneira rápida e eficiente. A partir deste preceito básico, diversas empresas se desenvolveram, chegando algumas a valer milhões de dólares. Entre as maiores empresas encontram-se o Google, o Yahoo, o Bing, o Lycos, o Cadê e, mais recentemente, a Amazon.com com o seu mecanismo de busca A9 porém inativo. Os buscadores se mostraram imprescindíveis para o fluxo de acesso e a conquista novos visitantes. Antes do advento da Web, havia sistemas para outros protocolos ou usos, como o Archie para sites FTP anônimos e o Veronica para o Gopher (protocolo de redes de computadores que foi desenhado para indexar repositórios de documentos na Internet, baseado-se em menus).","timestamp":"2017-11-09T14:38:00Z","_type":"ptwiki"}} +{"id":"rowiki_1","fields":{"title_ro":"Motor de căutare","text_ro":"Un motor de căutare este un program apelabil căutător, care accesează Internetul în mod automat și frecvent și care stochează titlul, cuvinte cheie și, parțial, chiar conținutul paginilor web într-o bază de date. În momentul în care un utilizator apelează la un motor de căutare pentru a găsi o informație, o anumită frază sau un cuvânt, motorul de căutare se va uita în această bază de date și, în funcție de anumite criterii de prioritate, va crea și afișa o listă de rezultate (engleză: hit list ).","timestamp":"2018-06-12T08:59:00Z","_type":"rowiki"}} +{"id":"ruwiki_1","fields":{"title_ru":"Поисковая машина","text_ru":"Поисковая машина (поиско́вый движо́к) — комплекс программ, предназначенный для поиска информации. Обычно является частью поисковой системы. Основными критериями качества работы поисковой машины являются релевантность (степень соответствия запроса и найденного, т.е. уместность результата), полнота индекса, учёт морфологии языка.","timestamp":"2017-03-22T01:16:00Z","_type":"ruwiki"}} +{"id":"svwiki_1","fields":{"title_sv":"Söktjänst","text_sv":"En söktjänst är en webbplats som gör det möjligt att söka efter innehåll på Internet. Söktjänsterna använder sökmotorer, även kallade sökrobotar, för att upptäcka, hämta in och indexera webbsidor.","timestamp":"2018-08-16T22:13:00Z","_type":"svwiki"}} +{"id":"tawiki_1","fields":{"title_ta":"தேடுபொறி","text_ta":"தேடுபொறி அல்லது தேடற்பொறி என்பது ஒரு கணினி நிரலாகும். இது இணையத்தில் குவிந்து கிடக்கும் தகவல்களில் இருந்தோ கணினியில் இருக்கும் தகவல்களில் இருந்தோ நமக்குத் தேவையான தகவலைப்பெற உதவுகின்றது. பொதுவாகப் பாவனையாளர்கள் ஒரு விடயம் சம்பந்தமாகத் தேடுதலை ஒரு சொல்லை வைத்து தேடுவார்கள். தேடுபொறிகள் சுட்டிகளைப் பயன்படுத்தி விரைவான தேடலை மேற்கொள்ளும். தேடுபொறிகள் என்பது பொதுவாக இணையத் தேடுபொறிகளை அல்லது இணையத் தேடற்பொறிகளையே குறிக்கும். வேறுசில தேடுபொறிகள் உள்ளூர் வலையமைப்பை மாத்திரமே தேடும். இணைய தேடு பொறிகள் பல பில்லியன் பக்கங்களில் இருந்து நமக்குத் தேவையான மிகப் பொருத்தமான பக்கங்களைத் தேடித் தரும். வேறுசில தேடற்பொறிகள் செய்திக் குழுக்கள், தகவற்தளங்கள், திறந்த இணையத்தளங்களைப் பட்டியலிடும் DMOZ.org போன்ற இணையத் தளங்களைத் தேடும். மனிதர்களால் எழுதப்பட்ட இணையத் தளங்களைப் பட்டியலிடும் தளங்களைப் போன்றல்லாது தேடு பொறிகள் அல்காரிதங்களைப் பாவித்துத் தேடல்களை மேற்கொள்ளும். வேறு சில தேடற்பொறிகளோ தமது இடைமுகத்தை வழங்கினாலும் உண்மையில் வேறுசில தேடுபொறிகளே தேடலை மேற்கொள்ளும். ஆரம்ப காலத்தில் ASCII முறை வரியுருக்களை கொண்டே தேடு சொற்களை உள்ளிட முடிந்தது. தற்போது ஒருங்குறி எழுத்துக்குறிமுறையை பல தேடுபொறிகளும் ஆதரிப்பதால் ஆங்கிலத்தில் மட்டுமல்லாது உலக மொழிகள் அனைத்திலும் அவ்வம் மொழிப்பக்கங்களை தேடிப்பெறக்கூடியதாகவுள்ளது.","timestamp":"2017-12-24T10:30:00Z","_type":"tawiki"}} +{"id":"tewiki_1","fields":{"title_te":"వెబ్ శోధనా యంత్రం","text_te":"వెబ్ శోధన యంత్రం అనేది వరల్డ్ వైడ్ వెబ్/ప్రపంచ వ్యాప్త వెబ్లో సమాచారాన్ని శోదించటానికి తయారుచేసిన ఒక సాధనం. శోధన ఫలితాలు సాధారణంగా ఒక జాబితాలో ఇవ్వబడతాయి మరియు అవి సాధారణంగా హిట్స్ అని పిలువబడతాయి. ఆ సమాచారం వెబ్ పేజీలు, చిత్రాలు, సమాచారం మరియు ఇతర రకాలైన జాబితాలను కలిగి ఉంటుంది.కొన్ని శోధనా యంత్రాలు డేటా బేస్ లు లేదా ఓపెన్ డైరెక్టరీలలో అందుబాటులో ఉన్న సమాచారాన్ని కూడా వెలికితీస్తాయి. మానవ సంపాదకులచే నిర్వహించబడే క్రమపరిచిన వెబ్ డైరెక్టరీల లా కాకుండా, శోధనా యంత్రాలు సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి ద్వారా లేదా సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి మరియు మానవ శక్తిల మిశ్రమంతో పనిచేస్తాయి.","timestamp":"2017-06-19T11:22:00Z","_type":"tewiki"}} +{"id":"thwiki_1","fields":{"title_th":"เสิร์ชเอนจิน","text_th":"เสิร์ชเอนจิน (search engine) หรือ โปรแกรมค้นหา คือ โปรแกรมที่ช่วยในการสืบค้นหาข้อมูล โดยเฉพาะข้อมูลบนอินเทอร์เน็ต โดยครอบคลุมทั้งข้อความ รูปภาพ ภาพเคลื่อนไหว เพลง ซอฟต์แวร์ แผนที่ ข้อมูลบุคคล กลุ่มข่าว และอื่น ๆ ซึ่งแตกต่างกันไปแล้วแต่โปรแกรมหรือผู้ให้บริการแต่ละราย. เสิร์ชเอนจินส่วนใหญ่จะค้นหาข้อมูลจากคำสำคัญ (คีย์เวิร์ด) ที่ผู้ใช้ป้อนเข้าไป จากนั้นก็จะแสดงรายการผลลัพธ์ที่มันคิดว่าผู้ใช้น่าจะต้องการขึ้นมา ในปัจจุบัน เสิร์ชเอนจินบางตัว เช่น กูเกิล จะบันทึกประวัติการค้นหาและการเลือกผลลัพธ์ของผู้ใช้ไว้ด้วย และจะนำประวัติที่บันทึกไว้นั้น มาช่วยกรองผลลัพธ์ในการค้นหาครั้งต่อ ๆ ไป","timestamp":"2016-06-18T11:06:00Z","_type":"thwiki"}} +{"id":"trwiki_1","fields":{"title_tr":"Arama motoru","text_tr":"Arama motoru, İnternet üzerinde bulunan içeriği aramak için kullanılan bir mekanizmadır. Üç bileşenden oluşur: web robotu, arama indeksi ve kullanıcı arabirimi. Ancak arama sonuçları genellikle sık tıklanan internet sayfalarından oluşan bir liste olarak verilmektedir.","timestamp":"2018-03-13T17:37:00Z","_type":"trwiki"}} +{"id":"zhwiki_1","fields":{"title_zh":"搜索引擎","text_zh":"搜索引擎(英语:search engine)是一种信息检索系统,旨在协助搜索存储在计算机系统中的信息。搜索结果一般被称为“hits”,通常会以表单的形式列出。网络搜索引擎是最常见、公开的一种搜索引擎,其功能为搜索万维网上储存的信息.","timestamp":"2018-08-27T05:47:00Z","_type":"zhwiki"}} diff --git a/example/wiki_doc_arwiki_1.json b/example/wiki_doc_arwiki_1.json index 8b766f0..fdbdac0 100644 --- a/example/wiki_doc_arwiki_1.json +++ b/example/wiki_doc_arwiki_1.json @@ -1,6 +1,9 @@ { - "title_ar": "محرك بحث", - "text_ar": "محرك البحث (بالإنجليزية: Search engine) هو نظام لإسترجاع المعلومات صمم للمساعدة على البحث عن المعلومات المخزنة على أي نظام حاسوبي. تعرض نتائج البحث عادة على شكل قائمة لأماكن تواجد المعلومات ومرتبة وفق معايير معينة. تسمح محركات البحث باختصار مدة البحث والتغلب على مشكلة أحجام البيانات المتصاعدة (إغراق معلوماتي).", - "timestamp": "2018-03-25T18:04:00Z", - "_type": "arwiki" + "id": "arwiki_1", + "fields": { + "title_ar": "محرك بحث", + "text_ar": "محرك البحث (بالإنجليزية: Search engine) هو نظام لإسترجاع المعلومات صمم للمساعدة على البحث عن المعلومات المخزنة على أي نظام حاسوبي. تعرض نتائج البحث عادة على شكل قائمة لأماكن تواجد المعلومات ومرتبة وفق معايير معينة. تسمح محركات البحث باختصار مدة البحث والتغلب على مشكلة أحجام البيانات المتصاعدة (إغراق معلوماتي).", + "timestamp": "2018-03-25T18:04:00Z", + "_type": "arwiki" + } } diff --git a/example/wiki_doc_bgwiki_1.json b/example/wiki_doc_bgwiki_1.json index 0b585be..3ad2735 100644 --- a/example/wiki_doc_bgwiki_1.json +++ b/example/wiki_doc_bgwiki_1.json @@ -1,6 +1,9 @@ { - "title_bg": "Търсачка", - "text_bg": "Търсачка или търсеща машина (на английски: Web search engine) е специализиран софтуер за извличане на информация, съхранена в компютърна система или мрежа. Това може да е персонален компютър, Интернет, корпоративна мрежа и т.н. Без допълнителни уточнения, най-често под търсачка се разбира уеб(-)търсачка, която търси в Интернет. Други видове търсачки са корпоративните търсачки, които търсят в интранет мрежите, личните търсачки – за индивидуалните компютри и мобилните търсачки. В търсачката потребителят (търсещият) прави запитване за съдържание, отговарящо на определен критерий (обикновено такъв, който съдържа определени думи и фрази). В резултат се получават списък от точки, които отговарят, пълно или частично, на този критерий. Търсачките обикновено използват редовно подновявани индекси, за да оперират бързо и ефикасно. Някои търсачки също търсят в информацията, която е на разположение в нюзгрупите и други големи бази данни. За разлика от Уеб директориите, които се поддържат от хора редактори, търсачките оперират алгоритмично. Повечето Интернет търсачки са притежавани от различни корпорации.", - "timestamp": "2018-07-11T11:03:00Z", - "_type": "bgwiki" + "id": "bgwiki_1", + "fields": { + "title_bg": "Търсачка", + "text_bg": "Търсачка или търсеща машина (на английски: Web search engine) е специализиран софтуер за извличане на информация, съхранена в компютърна система или мрежа. Това може да е персонален компютър, Интернет, корпоративна мрежа и т.н. Без допълнителни уточнения, най-често под търсачка се разбира уеб(-)търсачка, която търси в Интернет. Други видове търсачки са корпоративните търсачки, които търсят в интранет мрежите, личните търсачки – за индивидуалните компютри и мобилните търсачки. В търсачката потребителят (търсещият) прави запитване за съдържание, отговарящо на определен критерий (обикновено такъв, който съдържа определени думи и фрази). В резултат се получават списък от точки, които отговарят, пълно или частично, на този критерий. Търсачките обикновено използват редовно подновявани индекси, за да оперират бързо и ефикасно. Някои търсачки също търсят в информацията, която е на разположение в нюзгрупите и други големи бази данни. За разлика от Уеб директориите, които се поддържат от хора редактори, търсачките оперират алгоритмично. Повечето Интернет търсачки са притежавани от различни корпорации.", + "timestamp": "2018-07-11T11:03:00Z", + "_type": "bgwiki" + } } diff --git a/example/wiki_doc_cawiki_1.json b/example/wiki_doc_cawiki_1.json index 119c247..ffb67e6 100644 --- a/example/wiki_doc_cawiki_1.json +++ b/example/wiki_doc_cawiki_1.json @@ -1,6 +1,9 @@ { - "title_ca": "Motor de cerca", - "text_ca": "Un motor de cerca o de recerca o bé cercador és un programa informàtic dissenyat per ajudar a trobar informació emmagatzemada en un sistema informàtic com ara una xarxa, Internet, un servidor o un ordinador personal. L'objectiu principal és el de trobar altres programes informàtics, pàgines web i documents, entre d'altres. A partir d'una determinada paraula o paraules o una determinada frase l'usuari demana un contingut sota un criteri determinat i retorna una llista de referències que compleixin aquest criteri. El procés es realitza a través de les metadades, vies per comunicar informació que utilitzen els motors per cada cerca. Els índex que utilitzen els cercadors sempre estan actualitzats a través d'un robot web per generar rapidesa i eficàcia en la recerca. Els directoris, en canvi, són gestionats per editors humans.", - "timestamp": "2018-07-09T18:07:00Z", - "_type": "cawiki" + "id": "cawiki_1", + "fields": { + "title_ca": "Motor de cerca", + "text_ca": "Un motor de cerca o de recerca o bé cercador és un programa informàtic dissenyat per ajudar a trobar informació emmagatzemada en un sistema informàtic com ara una xarxa, Internet, un servidor o un ordinador personal. L'objectiu principal és el de trobar altres programes informàtics, pàgines web i documents, entre d'altres. A partir d'una determinada paraula o paraules o una determinada frase l'usuari demana un contingut sota un criteri determinat i retorna una llista de referències que compleixin aquest criteri. El procés es realitza a través de les metadades, vies per comunicar informació que utilitzen els motors per cada cerca. Els índex que utilitzen els cercadors sempre estan actualitzats a través d'un robot web per generar rapidesa i eficàcia en la recerca. Els directoris, en canvi, són gestionats per editors humans.", + "timestamp": "2018-07-09T18:07:00Z", + "_type": "cawiki" + } } diff --git a/example/wiki_doc_cswiki_1.json b/example/wiki_doc_cswiki_1.json index 1f222ef..89c994a 100644 --- a/example/wiki_doc_cswiki_1.json +++ b/example/wiki_doc_cswiki_1.json @@ -1,6 +1,9 @@ { - "title_cs": "Vyhledávač", - "text_cs": "Vyhledávač je počítačový systém či program, který umožňuje uživateli zadat nějaký libovolný nebo specifikovaný vyhledávaný výraz a získat z velkého objemu dat informace, které jsou v souladu s tímto dotazem. Jako vyhledávač se označují i ​​webové stránky, jejichž hlavní funkcí je poskytování takového systému či programu. Jako internetový vyhledávač se označuje buď vyhledávač, na který se přistupuje přes internet, nebo vyhledávač, jehož zdrojem vyhledávání je internet (tj. WWW, Usenet apod.). Jako online vyhledávač se označuje vyhledávač, při jehož výkonu činnosti dochází k výměně dat v rámci nějaké počítačové sítě, nejčastěji to je internetový vyhledávač. Fulltextový vyhledávač je vyhedávač, který vykonává fulltextové vyhledávání.", - "timestamp": "2017-11-10T21:59:00Z", - "_type": "cswiki" + "id": "cswiki_1", + "fields": { + "title_cs": "Vyhledávač", + "text_cs": "Vyhledávač je počítačový systém či program, který umožňuje uživateli zadat nějaký libovolný nebo specifikovaný vyhledávaný výraz a získat z velkého objemu dat informace, které jsou v souladu s tímto dotazem. Jako vyhledávač se označují i ​​webové stránky, jejichž hlavní funkcí je poskytování takového systému či programu. Jako internetový vyhledávač se označuje buď vyhledávač, na který se přistupuje přes internet, nebo vyhledávač, jehož zdrojem vyhledávání je internet (tj. WWW, Usenet apod.). Jako online vyhledávač se označuje vyhledávač, při jehož výkonu činnosti dochází k výměně dat v rámci nějaké počítačové sítě, nejčastěji to je internetový vyhledávač. Fulltextový vyhledávač je vyhedávač, který vykonává fulltextové vyhledávání.", + "timestamp": "2017-11-10T21:59:00Z", + "_type": "cswiki" + } } diff --git a/example/wiki_doc_dawiki_1.json b/example/wiki_doc_dawiki_1.json index e38abbb..ff1ee22 100644 --- a/example/wiki_doc_dawiki_1.json +++ b/example/wiki_doc_dawiki_1.json @@ -1,6 +1,9 @@ { - "title_da": "Søgemaskine", - "text_da": "En søgemaskine er en applikation til at hjælpe en bruger med at finde information. Det kan f.eks. være at finde filer med bestemte data (f.eks. ord), gemt i en computers hukommelse, for eksempel via World Wide Web (kaldes så en websøgemaskine). Ofte bruges søgemaskine fejlagtigt om linkkataloger eller Netguider.", - "timestamp": "2017-09-04T01:54:00Z", - "_type": "dawiki" + "id": "dawiki_1", + "fields": { + "title_da": "Søgemaskine", + "text_da": "En søgemaskine er en applikation til at hjælpe en bruger med at finde information. Det kan f.eks. være at finde filer med bestemte data (f.eks. ord), gemt i en computers hukommelse, for eksempel via World Wide Web (kaldes så en websøgemaskine). Ofte bruges søgemaskine fejlagtigt om linkkataloger eller Netguider.", + "timestamp": "2017-09-04T01:54:00Z", + "_type": "dawiki" + } } diff --git a/example/wiki_doc_dewiki_1.json b/example/wiki_doc_dewiki_1.json index ffeb346..c5f0a83 100644 --- a/example/wiki_doc_dewiki_1.json +++ b/example/wiki_doc_dewiki_1.json @@ -1,6 +1,9 @@ { - "title_de": "Suchmaschine", - "text_de": "Eine Suchmaschine ist ein Programm zur Recherche von Dokumenten, die in einem Computer oder einem Computernetzwerk wie z. B. dem World Wide Web gespeichert sind. Internet-Suchmaschinen haben ihren Ursprung in Information-Retrieval-Systemen. Sie erstellen einen Schlüsselwort-Index für die Dokumentbasis, um Suchanfragen über Schlüsselwörter mit einer nach Relevanz geordneten Trefferliste zu beantworten. Nach Eingabe eines Suchbegriffs liefert eine Suchmaschine eine Liste von Verweisen auf möglicherweise relevante Dokumente, meistens dargestellt mit Titel und einem kurzen Auszug des jeweiligen Dokuments. Dabei können verschiedene Suchverfahren Anwendung finden.", - "timestamp": "2017-09-04T01:54:00Z", - "_type": "dewiki" + "id": "dewiki_1", + "fields": { + "title_de": "Suchmaschine", + "text_de": "Eine Suchmaschine ist ein Programm zur Recherche von Dokumenten, die in einem Computer oder einem Computernetzwerk wie z. B. dem World Wide Web gespeichert sind. Internet-Suchmaschinen haben ihren Ursprung in Information-Retrieval-Systemen. Sie erstellen einen Schlüsselwort-Index für die Dokumentbasis, um Suchanfragen über Schlüsselwörter mit einer nach Relevanz geordneten Trefferliste zu beantworten. Nach Eingabe eines Suchbegriffs liefert eine Suchmaschine eine Liste von Verweisen auf möglicherweise relevante Dokumente, meistens dargestellt mit Titel und einem kurzen Auszug des jeweiligen Dokuments. Dabei können verschiedene Suchverfahren Anwendung finden.", + "timestamp": "2017-09-04T01:54:00Z", + "_type": "dewiki" + } } diff --git a/example/wiki_doc_elwiki_1.json b/example/wiki_doc_elwiki_1.json index b4eb58e..42f143b 100644 --- a/example/wiki_doc_elwiki_1.json +++ b/example/wiki_doc_elwiki_1.json @@ -1,6 +1,9 @@ { - "title_el": "Μηχανή αναζήτησης", - "text_el": "Μια μηχανή αναζήτησης είναι μια εφαρμογή που επιτρέπει την αναζήτηση κειμένων και αρχείων στο Διαδίκτυο. Αποτελείται από ένα πρόγραμμα υπολογιστή που βρίσκεται σε έναν ή περισσότερους υπολογιστές στους οποίους δημιουργεί μια βάση δεδομένων με τις πληροφορίες που συλλέγει από το διαδίκτυο, και το διαδραστικό περιβάλλον που εμφανίζεται στον τελικό χρήστη ο οποίος χρησιμοποιεί την εφαρμογή από άλλον υπολογιστή συνδεδεμένο στο διαδίκτυο. Οι μηχανές αναζήτησης αποτελούνται από 3 είδη λογισμικού, το spider software, το index software και το query software.", - "timestamp": "2017-11-21T19:57:00Z", - "_type": "elwiki" + "id": "elwiki_1", + "fields": { + "title_el": "Μηχανή αναζήτησης", + "text_el": "Μια μηχανή αναζήτησης είναι μια εφαρμογή που επιτρέπει την αναζήτηση κειμένων και αρχείων στο Διαδίκτυο. Αποτελείται από ένα πρόγραμμα υπολογιστή που βρίσκεται σε έναν ή περισσότερους υπολογιστές στους οποίους δημιουργεί μια βάση δεδομένων με τις πληροφορίες που συλλέγει από το διαδίκτυο, και το διαδραστικό περιβάλλον που εμφανίζεται στον τελικό χρήστη ο οποίος χρησιμοποιεί την εφαρμογή από άλλον υπολογιστή συνδεδεμένο στο διαδίκτυο. Οι μηχανές αναζήτησης αποτελούνται από 3 είδη λογισμικού, το spider software, το index software και το query software.", + "timestamp": "2017-11-21T19:57:00Z", + "_type": "elwiki" + } } diff --git a/example/wiki_doc_enwiki_1.json b/example/wiki_doc_enwiki_1.json index 0173803..bcb7d18 100644 --- a/example/wiki_doc_enwiki_1.json +++ b/example/wiki_doc_enwiki_1.json @@ -1,6 +1,9 @@ { - "title_en": "Search engine (computing)", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "_type": "enwiki" + "id": "enwiki_1", + "fields": { + "title_en": "Search engine (computing)", + "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "_type": "enwiki" + } } diff --git a/example/wiki_doc_eswiki_1.json b/example/wiki_doc_eswiki_1.json index d1747f8..5d3c7aa 100644 --- a/example/wiki_doc_eswiki_1.json +++ b/example/wiki_doc_eswiki_1.json @@ -1,6 +1,9 @@ { - "title_es": "Motor de búsqueda", - "text_es": "Un motor de búsqueda o buscador es un sistema informático que busca archivos almacenados en servidores web gracias a su spider (también llamado araña web). Un ejemplo son los buscadores de Internet (algunos buscan únicamente en la web, pero otros lo hacen además en noticias, servicios como Gopher, FTP, etc.) cuando se pide información sobre algún tema. Las búsquedas se hacen con palabras clave o con árboles jerárquicos por temas; el resultado de la búsqueda «Página de resultados del buscador» es un listado de direcciones web en los que se mencionan temas relacionados con las palabras clave buscadas. Como operan de forma automática, los motores de búsqueda contienen generalmente más información que los directorios. Sin embargo, estos últimos también han de construirse a partir de búsquedas (no automatizadas) o bien a partir de avisos dados por los creadores de páginas.", - "timestamp": "2018-08-30T11:30:00Z", - "_type": "eswiki" + "id": "eswiki_1", + "fields": { + "title_es": "Motor de búsqueda", + "text_es": "Un motor de búsqueda o buscador es un sistema informático que busca archivos almacenados en servidores web gracias a su spider (también llamado araña web). Un ejemplo son los buscadores de Internet (algunos buscan únicamente en la web, pero otros lo hacen además en noticias, servicios como Gopher, FTP, etc.) cuando se pide información sobre algún tema. Las búsquedas se hacen con palabras clave o con árboles jerárquicos por temas; el resultado de la búsqueda «Página de resultados del buscador» es un listado de direcciones web en los que se mencionan temas relacionados con las palabras clave buscadas. Como operan de forma automática, los motores de búsqueda contienen generalmente más información que los directorios. Sin embargo, estos últimos también han de construirse a partir de búsquedas (no automatizadas) o bien a partir de avisos dados por los creadores de páginas.", + "timestamp": "2018-08-30T11:30:00Z", + "_type": "eswiki" + } } diff --git a/example/wiki_doc_fawiki_1.json b/example/wiki_doc_fawiki_1.json index 1457b00..093cc83 100644 --- a/example/wiki_doc_fawiki_1.json +++ b/example/wiki_doc_fawiki_1.json @@ -1,6 +1,9 @@ { - "title_fa": "موتور جستجو (پردازش)", - "text_fa": "موتور جستجو یا جویشگر، در فرهنگ رایانه، به طور عمومی به برنامه‌ای گفته می‌شود که کلمات کلیدی را در یک سند یا بانک اطلاعاتی جستجو می‌کند. در اینترنت به برنامه‌ای گفته می‌شود که کلمات کلیدی موجود در فایل‌ها و سندهای وب جهانی، گروه‌های خبری، منوهای گوفر و آرشیوهای FTP را جستجو می‌کند. جویشگرهای زیادی وجود دارند که امروزه از معروفترین و پراستفاده‌ترین آنها می‌توان به google و یاهو! جستجو اشاره کرد.", - "timestamp": "2017-01-06T02:46:00Z", - "_type": "fawiki" + "id": "fawiki_1", + "fields": { + "title_fa": "موتور جستجو (پردازش)", + "text_fa": "موتور جستجو یا جویشگر، در فرهنگ رایانه، به طور عمومی به برنامه‌ای گفته می‌شود که کلمات کلیدی را در یک سند یا بانک اطلاعاتی جستجو می‌کند. در اینترنت به برنامه‌ای گفته می‌شود که کلمات کلیدی موجود در فایل‌ها و سندهای وب جهانی، گروه‌های خبری، منوهای گوفر و آرشیوهای FTP را جستجو می‌کند. جویشگرهای زیادی وجود دارند که امروزه از معروفترین و پراستفاده‌ترین آنها می‌توان به google و یاهو! جستجو اشاره کرد.", + "timestamp": "2017-01-06T02:46:00Z", + "_type": "fawiki" + } } diff --git a/example/wiki_doc_fiwiki_1.json b/example/wiki_doc_fiwiki_1.json index 78d6861..e930816 100644 --- a/example/wiki_doc_fiwiki_1.json +++ b/example/wiki_doc_fiwiki_1.json @@ -1,6 +1,9 @@ { - "title_fi": "Hakukone", - "text_fi": "Hakukone on web-pohjainen ohjelma, joka etsii jatkuvasti Internetistä (varsinkin Webistä) uusia sivuja eritellen ja liittäen ne hakemistoonsa erityisten hakusanojen mukaan. Näitä hyväksi käyttäen hakukone tulostaa käyttäjän syöttämiä hakusanoja lähimpänä olevat sivut. Analysointi tapahtuu käytännössä eri hakukoneissa erilaisilla menetelmillä.", - "timestamp": "2017-10-04T14:33:00Z", - "_type": "fiwiki" + "id": "fiwiki_1", + "fields": { + "title_fi": "Hakukone", + "text_fi": "Hakukone on web-pohjainen ohjelma, joka etsii jatkuvasti Internetistä (varsinkin Webistä) uusia sivuja eritellen ja liittäen ne hakemistoonsa erityisten hakusanojen mukaan. Näitä hyväksi käyttäen hakukone tulostaa käyttäjän syöttämiä hakusanoja lähimpänä olevat sivut. Analysointi tapahtuu käytännössä eri hakukoneissa erilaisilla menetelmillä.", + "timestamp": "2017-10-04T14:33:00Z", + "_type": "fiwiki" + } } diff --git a/example/wiki_doc_frwiki_1.json b/example/wiki_doc_frwiki_1.json index f90c893..4090cd0 100644 --- a/example/wiki_doc_frwiki_1.json +++ b/example/wiki_doc_frwiki_1.json @@ -1,6 +1,9 @@ { - "title_fr": "Moteur de recherche", - "text_fr": "Un moteur de recherche est une application web permettant de trouver des ressources à partir d'une requête sous forme de mots. Les ressources peuvent être des pages web, des articles de forums Usenet, des images, des vidéos, des fichiers, etc. Certains sites web offrent un moteur de recherche comme principale fonctionnalité ; on appelle alors « moteur de recherche » le site lui-même. Ce sont des instruments de recherche sur le web sans intervention humaine, ce qui les distingue des annuaires. Ils sont basés sur des « robots », encore appelés « bots », « spiders «, « crawlers » ou « agents », qui parcourent les sites à intervalles réguliers et de façon automatique pour découvrir de nouvelles adresses (URL). Ils suivent les liens hypertextes qui relient les pages les unes aux autres, les uns après les autres. Chaque page identifiée est alors indexée dans une base de données, accessible ensuite par les internautes à partir de mots-clés. C'est par abus de langage qu'on appelle également « moteurs de recherche » des sites web proposant des annuaires de sites web : dans ce cas, ce sont des instruments de recherche élaborés par des personnes qui répertorient et classifient des sites web jugés dignes d'intérêt, et non des robots d'indexation. Les moteurs de recherche ne s'appliquent pas qu'à Internet : certains moteurs sont des logiciels installés sur un ordinateur personnel. Ce sont des moteurs dits « de bureau » qui combinent la recherche parmi les fichiers stockés sur le PC et la recherche parmi les sites Web — on peut citer par exemple Exalead Desktop, Google Desktop et Copernic Desktop Search, Windex Server, etc. On trouve également des métamoteurs, c'est-à-dire des sites web où une même recherche est lancée simultanément sur plusieurs moteurs de recherche, les résultats étant ensuite fusionnés pour être présentés à l'internaute. On peut citer dans cette catégorie Ixquick, Mamma, Kartoo, Framabee ou Lilo.", - "timestamp": "2018-05-30T15:15:00Z", - "_type": "frwiki" + "id": "frwiki_1", + "fields": { + "title_fr": "Moteur de recherche", + "text_fr": "Un moteur de recherche est une application web permettant de trouver des ressources à partir d'une requête sous forme de mots. Les ressources peuvent être des pages web, des articles de forums Usenet, des images, des vidéos, des fichiers, etc. Certains sites web offrent un moteur de recherche comme principale fonctionnalité ; on appelle alors « moteur de recherche » le site lui-même. Ce sont des instruments de recherche sur le web sans intervention humaine, ce qui les distingue des annuaires. Ils sont basés sur des « robots », encore appelés « bots », « spiders «, « crawlers » ou « agents », qui parcourent les sites à intervalles réguliers et de façon automatique pour découvrir de nouvelles adresses (URL). Ils suivent les liens hypertextes qui relient les pages les unes aux autres, les uns après les autres. Chaque page identifiée est alors indexée dans une base de données, accessible ensuite par les internautes à partir de mots-clés. C'est par abus de langage qu'on appelle également « moteurs de recherche » des sites web proposant des annuaires de sites web : dans ce cas, ce sont des instruments de recherche élaborés par des personnes qui répertorient et classifient des sites web jugés dignes d'intérêt, et non des robots d'indexation. Les moteurs de recherche ne s'appliquent pas qu'à Internet : certains moteurs sont des logiciels installés sur un ordinateur personnel. Ce sont des moteurs dits « de bureau » qui combinent la recherche parmi les fichiers stockés sur le PC et la recherche parmi les sites Web — on peut citer par exemple Exalead Desktop, Google Desktop et Copernic Desktop Search, Windex Server, etc. On trouve également des métamoteurs, c'est-à-dire des sites web où une même recherche est lancée simultanément sur plusieurs moteurs de recherche, les résultats étant ensuite fusionnés pour être présentés à l'internaute. On peut citer dans cette catégorie Ixquick, Mamma, Kartoo, Framabee ou Lilo.", + "timestamp": "2018-05-30T15:15:00Z", + "_type": "frwiki" + } } diff --git a/example/wiki_doc_gawiki_1.json b/example/wiki_doc_gawiki_1.json index 492dc58..ad69390 100644 --- a/example/wiki_doc_gawiki_1.json +++ b/example/wiki_doc_gawiki_1.json @@ -1,6 +1,9 @@ { - "title_ga": "Inneall cuardaigh", - "text_ga": "Acmhainn ar an ngréasán domhanda atá insroichte le brabhsálaí Gréasáin, a chabhraíonn leis an úsáideoir ionaid is eolas a aimsiú. Bíonn na hinnill cuardaigh (Yahoo, Lycos, Google, Ask Jeeves) ag cuardach tríd an ngréasán an t-am ar fad, ag tógáil innéacsanna ábhar éagsúla — mar shampla, ag aimsiú teidil, fotheidil, eochairfhocail is céadlínte cáipéisí. Uaidh sin, is féidir cuid mhaith cáipéisí éagsúla ar ábhar ar leith a aisghabháil. Déanann an cuardach leanúnach cinnte de go bhfuil na hinnéacsanna suas chun dáta. Mar sin féin, aisghabhann na hinnill an-chuid cháipéisí nach mbaineann le hábhar, agus tá an-iarracht ar siúl an t-am ar fad iad a fheabhsú.", - "timestamp": "2013-10-27T18:17:00Z", - "_type": "gawiki" + "id": "gawiki_1", + "fields": { + "title_ga": "Inneall cuardaigh", + "text_ga": "Acmhainn ar an ngréasán domhanda atá insroichte le brabhsálaí Gréasáin, a chabhraíonn leis an úsáideoir ionaid is eolas a aimsiú. Bíonn na hinnill cuardaigh (Yahoo, Lycos, Google, Ask Jeeves) ag cuardach tríd an ngréasán an t-am ar fad, ag tógáil innéacsanna ábhar éagsúla — mar shampla, ag aimsiú teidil, fotheidil, eochairfhocail is céadlínte cáipéisí. Uaidh sin, is féidir cuid mhaith cáipéisí éagsúla ar ábhar ar leith a aisghabháil. Déanann an cuardach leanúnach cinnte de go bhfuil na hinnéacsanna suas chun dáta. Mar sin féin, aisghabhann na hinnill an-chuid cháipéisí nach mbaineann le hábhar, agus tá an-iarracht ar siúl an t-am ar fad iad a fheabhsú.", + "timestamp": "2013-10-27T18:17:00Z", + "_type": "gawiki" + } } diff --git a/example/wiki_doc_glwiki_1.json b/example/wiki_doc_glwiki_1.json index 8d1e981..667e187 100644 --- a/example/wiki_doc_glwiki_1.json +++ b/example/wiki_doc_glwiki_1.json @@ -1,6 +1,9 @@ { - "title_gl": "Motor de busca", - "text_gl": "Un motor de busca ou buscador é un sistema informático que procura arquivos almacenados en servidores web, un exemplo son os buscadores de internet (algúns buscan só na Web pero outros buscan ademais en News, Gopher, FTP etc.) cando lles pedimos información sobre algún tema. As procuras fanse con palabras clave ou con árbores xerárquicas por temas; o resultado da procura é unha listaxe de direccións Web nas que se mencionan temas relacionados coas palabras clave buscadas.", - "timestamp": "2016-10-31T13:33:00Z", - "_type": "glwiki" + "id": "glwiki_1", + "fields": { + "title_gl": "Motor de busca", + "text_gl": "Un motor de busca ou buscador é un sistema informático que procura arquivos almacenados en servidores web, un exemplo son os buscadores de internet (algúns buscan só na Web pero outros buscan ademais en News, Gopher, FTP etc.) cando lles pedimos información sobre algún tema. As procuras fanse con palabras clave ou con árbores xerárquicas por temas; o resultado da procura é unha listaxe de direccións Web nas que se mencionan temas relacionados coas palabras clave buscadas.", + "timestamp": "2016-10-31T13:33:00Z", + "_type": "glwiki" + } } diff --git a/example/wiki_doc_guwiki_1.json b/example/wiki_doc_guwiki_1.json index eb0cffd..a0afc9b 100644 --- a/example/wiki_doc_guwiki_1.json +++ b/example/wiki_doc_guwiki_1.json @@ -1,6 +1,9 @@ { - "title_gu": "વેબ શોધ એન્જીન", - "text_gu": "વેબ શોધ એન્જિન એ વર્લ્ડ વાઈડ વેબ (World Wide Web) પર વિવિધ માહિતી શોધવા માટે ઉપયોગમાં લેવામાં આવે છે. શોધ લીસ્ટને સામાન્ય રીતે યાદીમાં દર્શાવવામાં આવે છે અને જેને સામાન્ય રીતે હીટ્સ કહેવામાં આવે છે. જે માહિતી મળે છે તેમાં વેબ પૃષ્ઠ (web page), છબીઓ, માહિતી અને અન્ય પ્રકારની ફાઈલો હોય છે. કેટલાક શોધ એન્જિનો ન્યુઝબુક, ડેટાબેઝ અને અન્ય પ્રકારની ઓપન ડીરેક્ટરી (open directories)ઓની વિગતો પણ આપે છે. વ્યકિતઓ દ્વારા દુરસ્ત થતી વેબ ડાયરેક્ટરીઝ (Web directories)થી અલગ રીતે, શોધ એન્જિન ઍલ્ગરિધમનો અથવા ઍલ્ગરિધમ (algorithmic) અને માનવીય બાબતોના મિક્ષણનો ઉપયોગ કરે છે.", - "timestamp": "2013-04-04T19:28:00Z", - "_type": "guwiki" + "id": "guwiki_1", + "fields": { + "title_gu": "વેબ શોધ એન્જીન", + "text_gu": "વેબ શોધ એન્જિન એ વર્લ્ડ વાઈડ વેબ (World Wide Web) પર વિવિધ માહિતી શોધવા માટે ઉપયોગમાં લેવામાં આવે છે. શોધ લીસ્ટને સામાન્ય રીતે યાદીમાં દર્શાવવામાં આવે છે અને જેને સામાન્ય રીતે હીટ્સ કહેવામાં આવે છે. જે માહિતી મળે છે તેમાં વેબ પૃષ્ઠ (web page), છબીઓ, માહિતી અને અન્ય પ્રકારની ફાઈલો હોય છે. કેટલાક શોધ એન્જિનો ન્યુઝબુક, ડેટાબેઝ અને અન્ય પ્રકારની ઓપન ડીરેક્ટરી (open directories)ઓની વિગતો પણ આપે છે. વ્યકિતઓ દ્વારા દુરસ્ત થતી વેબ ડાયરેક્ટરીઝ (Web directories)થી અલગ રીતે, શોધ એન્જિન ઍલ્ગરિધમનો અથવા ઍલ્ગરિધમ (algorithmic) અને માનવીય બાબતોના મિક્ષણનો ઉપયોગ કરે છે.", + "timestamp": "2013-04-04T19:28:00Z", + "_type": "guwiki" + } } diff --git a/example/wiki_doc_hiwiki_1.json b/example/wiki_doc_hiwiki_1.json index 59456f8..494a176 100644 --- a/example/wiki_doc_hiwiki_1.json +++ b/example/wiki_doc_hiwiki_1.json @@ -1,6 +1,9 @@ { - "title_hi": "खोज इंजन", - "text_hi": "ऐसे कम्प्यूटर प्रोग्राम खोजी इंजन (search engine) कहलाते हैं जो किसी कम्प्यूटर सिस्टम पर भण्डारित सूचना में से वांछित सूचना को ढूढ निकालते हैं। ये इंजन प्राप्त परिणामों को प्रायः एक सूची के रूप में प्रस्तुत करते हैं जिससे वांछित सूचना की प्रकृति और उसकी स्थिति का पता चलता है। खोजी इंजन किसी सूचना तक अपेक्षाकृत बहुत कम समय में पहुँचने में हमारी सहायता करते हैं। वे 'सूचना ओवरलोड' से भी हमे बचाते हैं। खोजी इंजन का सबसे प्रचलित रूप 'वेब खोजी इंजन' है जो वर्ल्ड वाइड वेब पर सूचना खोजने के लिये प्रयुक्त होता है।", - "timestamp": "2017-10-19T20:09:00Z", - "_type": "hiwiki" + "id": "hiwiki_1", + "fields": { + "title_hi": "खोज इंजन", + "text_hi": "ऐसे कम्प्यूटर प्रोग्राम खोजी इंजन (search engine) कहलाते हैं जो किसी कम्प्यूटर सिस्टम पर भण्डारित सूचना में से वांछित सूचना को ढूढ निकालते हैं। ये इंजन प्राप्त परिणामों को प्रायः एक सूची के रूप में प्रस्तुत करते हैं जिससे वांछित सूचना की प्रकृति और उसकी स्थिति का पता चलता है। खोजी इंजन किसी सूचना तक अपेक्षाकृत बहुत कम समय में पहुँचने में हमारी सहायता करते हैं। वे 'सूचना ओवरलोड' से भी हमे बचाते हैं। खोजी इंजन का सबसे प्रचलित रूप 'वेब खोजी इंजन' है जो वर्ल्ड वाइड वेब पर सूचना खोजने के लिये प्रयुक्त होता है।", + "timestamp": "2017-10-19T20:09:00Z", + "_type": "hiwiki" + } } diff --git a/example/wiki_doc_huwiki_1.json b/example/wiki_doc_huwiki_1.json index d2595ec..95f97a0 100644 --- a/example/wiki_doc_huwiki_1.json +++ b/example/wiki_doc_huwiki_1.json @@ -1,6 +1,9 @@ { - "title_hu": "Keresőmotor", - "text_hu": "A keresőmotor az informatikában egy program vagy alkalmazás, amely bizonyos feltételeknek (többnyire egy szónak vagy kifejezésnek) megfelelő információkat keres valamilyen számítógépes környezetben. Ez a cikk a World Wide Weben (és esetleg az internet más részein, például a Useneten) kereső alkalmazásokról szól, a keresőmotor kifejezés önmagában általában ezekre vonatkozik. Másfajta keresőmotorokra példák a vállalati keresőmotorok, amik egy intraneten, és a személyi keresőmotorok, amik egy személyi számítógép állományai között keresnek.", - "timestamp": "2018-05-15T20:40:00Z", - "_type": "huwiki" + "id": "huwiki_1", + "fields": { + "title_hu": "Keresőmotor", + "text_hu": "A keresőmotor az informatikában egy program vagy alkalmazás, amely bizonyos feltételeknek (többnyire egy szónak vagy kifejezésnek) megfelelő információkat keres valamilyen számítógépes környezetben. Ez a cikk a World Wide Weben (és esetleg az internet más részein, például a Useneten) kereső alkalmazásokról szól, a keresőmotor kifejezés önmagában általában ezekre vonatkozik. Másfajta keresőmotorokra példák a vállalati keresőmotorok, amik egy intraneten, és a személyi keresőmotorok, amik egy személyi számítógép állományai között keresnek.", + "timestamp": "2018-05-15T20:40:00Z", + "_type": "huwiki" + } } diff --git a/example/wiki_doc_hywiki_1.json b/example/wiki_doc_hywiki_1.json index 4d9e1a2..0e36b1a 100644 --- a/example/wiki_doc_hywiki_1.json +++ b/example/wiki_doc_hywiki_1.json @@ -1,6 +1,9 @@ { - "title_hy": "Որոնողական համակարգ", - "text_hy": "Որոնողական համակարգը գործիք է, որը նախատեսված է համապատասխան բառերով Համաշխարհային ցանցում որոնումներ կատարելու համար։ Ստեղծված է համացանցում և FTP սերվերներում ինֆորմացիա փնտրելու համար։ Փնտրված արդյունքները ընդհանրապես ներկայացվում են արդյունքների ցանկում և սովորաբար կոչվում են նպատակակակետ, հիթ։ Ինֆորմացիան կարող է բաղկացած լինել վեբ էջերից, նկարներից, ինֆորմացիաներից և այլ տիպի ֆայլերից ու տվյալներից։ Այն կարող է օգտագործվել տարբեր տեսակի տեղեկատվություն որոնելու համար, ներառյալ՝ կայքեր, ֆորումներ, նկարներ, վիդեոներ, ֆայլեր և այլն։ Որոշ կայքեր արդեն իրենցից ներկայացնում են ինչ-որ որոնողական համակարգ, օրինակ՝ Dailymotion, YouTube և Google Videos ինտերնետում տեղադրված տեսահոլովակների որոնողական կայքեր են։ Որոնողական կայքը բաղկացած է \"ռոբոտներից\", որոնց անվանում են նաև bot, spider, crawler, որոնք ավտոմատ կերպով, առանց մարդկային միջամտության պարբերաբար հետազոտում են կայքերը։ Որոնողական կայքերը հետևում են հղումներին, որոնք կապված լինելով իրար հետ ինդեքսավորում է յուրաքանչյուր էջ տվյալների բազայում՝ հետագայում բանալի բառերի օգնությամբ դառնալով հասանելի ինտերնետից օգտվողների համար։ Սխալմամբ, որոնողական կայքեր են անվանում նաև այն կայքերը, որոնք իրենցից ներկայացնում են կայքային տեղեկատուներ։ Այս կայքերում ուշադրության արժանի կայքերը ցուցակագրվում և դասակարգվում են մարդկային ռեսուրսների շնորհիվ, այլ ոչ թե բոտերի կամ ռոբետների միջոցով։ Այդ կայքերից կարելի է նշել օրինակ՝ Yahoo!։ Yahoo!-ի որոնողական կայքը գտնվում է այստեղ։ Բոլոր որոնողական համակարգերը նախատեսված են ինտերնետում որոնում իրականացնելու համար, սակայն կան որոշ որոնողական համակարգերի տարատեսակներ, որոնք համակարգչային ծրագրեր են և հետևաբար տեղակայվում են համակարգչի մեջ։ Այս համակարգերը կոչվում են desktop։ Վերջիներս հնարավորություն են տալիս որոնելու թե համակարգչի մեջ կուտակված ֆայլեը, թե կայքերում տեղադրված ռեսուրսները։ Այդ ծրագրերից ամենահայտնիներն են՝ Exalead Desktop, Copernic Desktop Search Գոյություն ունեն նաև մետա-որոնողական համակարգեր, այսինքն կայքեր, որ նույն որոնումը կատարում են միաժամանակ տարբեր որոնողական կայքերի միջնորդությամբ։ Որոնման արդյունքները հետո դասակարգվում են որպեսզի ներկայացվեն օգտագործողին։ Մետա-որոնողական համակարգերի շարքից կարելի է թվարկել օրինակ՝ Mamma և Kartoo։", - "timestamp": "2017-11-20T17:47:00Z", - "_type": "hywiki" + "id": "hywiki_1", + "fields": { + "title_hy": "Որոնողական համակարգ", + "text_hy": "Որոնողական համակարգը գործիք է, որը նախատեսված է համապատասխան բառերով Համաշխարհային ցանցում որոնումներ կատարելու համար։ Ստեղծված է համացանցում և FTP սերվերներում ինֆորմացիա փնտրելու համար։ Փնտրված արդյունքները ընդհանրապես ներկայացվում են արդյունքների ցանկում և սովորաբար կոչվում են նպատակակակետ, հիթ։ Ինֆորմացիան կարող է բաղկացած լինել վեբ էջերից, նկարներից, ինֆորմացիաներից և այլ տիպի ֆայլերից ու տվյալներից։ Այն կարող է օգտագործվել տարբեր տեսակի տեղեկատվություն որոնելու համար, ներառյալ՝ կայքեր, ֆորումներ, նկարներ, վիդեոներ, ֆայլեր և այլն։ Որոշ կայքեր արդեն իրենցից ներկայացնում են ինչ-որ որոնողական համակարգ, օրինակ՝ Dailymotion, YouTube և Google Videos ինտերնետում տեղադրված տեսահոլովակների որոնողական կայքեր են։ Որոնողական կայքը բաղկացած է \"ռոբոտներից\", որոնց անվանում են նաև bot, spider, crawler, որոնք ավտոմատ կերպով, առանց մարդկային միջամտության պարբերաբար հետազոտում են կայքերը։ Որոնողական կայքերը հետևում են հղումներին, որոնք կապված լինելով իրար հետ ինդեքսավորում է յուրաքանչյուր էջ տվյալների բազայում՝ հետագայում բանալի բառերի օգնությամբ դառնալով հասանելի ինտերնետից օգտվողների համար։ Սխալմամբ, որոնողական կայքեր են անվանում նաև այն կայքերը, որոնք իրենցից ներկայացնում են կայքային տեղեկատուներ։ Այս կայքերում ուշադրության արժանի կայքերը ցուցակագրվում և դասակարգվում են մարդկային ռեսուրսների շնորհիվ, այլ ոչ թե բոտերի կամ ռոբետների միջոցով։ Այդ կայքերից կարելի է նշել օրինակ՝ Yahoo!։ Yahoo!-ի որոնողական կայքը գտնվում է այստեղ։ Բոլոր որոնողական համակարգերը նախատեսված են ինտերնետում որոնում իրականացնելու համար, սակայն կան որոշ որոնողական համակարգերի տարատեսակներ, որոնք համակարգչային ծրագրեր են և հետևաբար տեղակայվում են համակարգչի մեջ։ Այս համակարգերը կոչվում են desktop։ Վերջիներս հնարավորություն են տալիս որոնելու թե համակարգչի մեջ կուտակված ֆայլեը, թե կայքերում տեղադրված ռեսուրսները։ Այդ ծրագրերից ամենահայտնիներն են՝ Exalead Desktop, Copernic Desktop Search Գոյություն ունեն նաև մետա-որոնողական համակարգեր, այսինքն կայքեր, որ նույն որոնումը կատարում են միաժամանակ տարբեր որոնողական կայքերի միջնորդությամբ։ Որոնման արդյունքները հետո դասակարգվում են որպեսզի ներկայացվեն օգտագործողին։ Մետա-որոնողական համակարգերի շարքից կարելի է թվարկել օրինակ՝ Mamma և Kartoo։", + "timestamp": "2017-11-20T17:47:00Z", + "_type": "hywiki" + } } diff --git a/example/wiki_doc_idwiki_1.json b/example/wiki_doc_idwiki_1.json index 262ebeb..16e5802 100644 --- a/example/wiki_doc_idwiki_1.json +++ b/example/wiki_doc_idwiki_1.json @@ -1,6 +1,9 @@ { - "title_id": "Mesin pencari web", - "text_id": "Mesin pencari web atau mesin telusur web (bahasa Inggris: web search engine) adalah program komputer yang dirancang untuk melakukan pencarian atas berkas-berkas yang tersimpan dalam layanan www, ftp, publikasi milis, ataupun news group dalam sebuah ataupun sejumlah komputer peladen dalam suatu jaringan. Mesin pencari merupakan perangkat penelusur informasi dari dokumen-dokumen yang tersedia. Hasil pencarian umumnya ditampilkan dalam bentuk daftar yang seringkali diurutkan menurut tingkat akurasi ataupun rasio pengunjung atas suatu berkas yang disebut sebagai hits. Informasi yang menjadi target pencarian bisa terdapat dalam berbagai macam jenis berkas seperti halaman situs web, gambar, ataupun jenis-jenis berkas lainnya. Beberapa mesin pencari juga diketahui melakukan pengumpulan informasi atas data yang tersimpan dalam suatu basis data ataupun direktori web. Sebagian besar mesin pencari dijalankan oleh perusahaan swasta yang menggunakan algoritme kepemilikan dan basis data tertutup, di antaranya yang paling populer adalah safari Google (MSN Search dan Yahoo!). Telah ada beberapa upaya menciptakan mesin pencari dengan sumber terbuka (open source), contohnya adalah Htdig, Nutch, Egothor dan OpenFTS.", - "timestamp": "2017-11-20T17:47:00Z", - "_type": "idwiki" + "id": "idwiki_1", + "fields": { + "title_id": "Mesin pencari web", + "text_id": "Mesin pencari web atau mesin telusur web (bahasa Inggris: web search engine) adalah program komputer yang dirancang untuk melakukan pencarian atas berkas-berkas yang tersimpan dalam layanan www, ftp, publikasi milis, ataupun news group dalam sebuah ataupun sejumlah komputer peladen dalam suatu jaringan. Mesin pencari merupakan perangkat penelusur informasi dari dokumen-dokumen yang tersedia. Hasil pencarian umumnya ditampilkan dalam bentuk daftar yang seringkali diurutkan menurut tingkat akurasi ataupun rasio pengunjung atas suatu berkas yang disebut sebagai hits. Informasi yang menjadi target pencarian bisa terdapat dalam berbagai macam jenis berkas seperti halaman situs web, gambar, ataupun jenis-jenis berkas lainnya. Beberapa mesin pencari juga diketahui melakukan pengumpulan informasi atas data yang tersimpan dalam suatu basis data ataupun direktori web. Sebagian besar mesin pencari dijalankan oleh perusahaan swasta yang menggunakan algoritme kepemilikan dan basis data tertutup, di antaranya yang paling populer adalah safari Google (MSN Search dan Yahoo!). Telah ada beberapa upaya menciptakan mesin pencari dengan sumber terbuka (open source), contohnya adalah Htdig, Nutch, Egothor dan OpenFTS.", + "timestamp": "2017-11-20T17:47:00Z", + "_type": "idwiki" + } } diff --git a/example/wiki_doc_itwiki_1.json b/example/wiki_doc_itwiki_1.json index c58fbfa..b8bdd5d 100644 --- a/example/wiki_doc_itwiki_1.json +++ b/example/wiki_doc_itwiki_1.json @@ -1,6 +1,9 @@ { - "title_it": "Motore di ricerca", - "text_it": "Nell'ambito delle tecnologie di Internet, un motore di ricerca (in inglese search engine) è un sistema automatico che, su richiesta, analizza un insieme di dati (spesso da esso stesso raccolti) e restituisce un indice dei contenuti disponibili[1] classificandoli in modo automatico in base a formule statistico-matematiche che ne indichino il grado di rilevanza data una determinata chiave di ricerca. Uno dei campi in cui i motori di ricerca trovano maggiore utilizzo è quello dell'information retrieval e nel web. I motori di ricerca più utilizzati nel 2017 sono stati: Google, Bing, Baidu, Qwant, Yandex, Ecosia, DuckDuckGo.", - "timestamp": "2018-07-16T12:20:00Z", - "_type": "itwiki" + "id": "itwiki_1", + "fields": { + "title_it": "Motore di ricerca", + "text_it": "Nell'ambito delle tecnologie di Internet, un motore di ricerca (in inglese search engine) è un sistema automatico che, su richiesta, analizza un insieme di dati (spesso da esso stesso raccolti) e restituisce un indice dei contenuti disponibili[1] classificandoli in modo automatico in base a formule statistico-matematiche che ne indichino il grado di rilevanza data una determinata chiave di ricerca. Uno dei campi in cui i motori di ricerca trovano maggiore utilizzo è quello dell'information retrieval e nel web. I motori di ricerca più utilizzati nel 2017 sono stati: Google, Bing, Baidu, Qwant, Yandex, Ecosia, DuckDuckGo.", + "timestamp": "2018-07-16T12:20:00Z", + "_type": "itwiki" + } } diff --git a/example/wiki_doc_jawiki_1.json b/example/wiki_doc_jawiki_1.json index db74184..264ff02 100644 --- a/example/wiki_doc_jawiki_1.json +++ b/example/wiki_doc_jawiki_1.json @@ -1,6 +1,9 @@ { - "title_ja": "検索エンジン", - "text_ja": "検索エンジン(けんさくエンジン、英語: search engine)は、狭義にはインターネットに存在する情報(ウェブページ、ウェブサイト、画像ファイル、ネットニュースなど)を検索する機能およびそのプログラム。インターネットの普及初期には、検索としての機能のみを提供していたウェブサイトそのものを検索エンジンと呼んだが、現在では様々なサービスが加わったポータルサイト化が進んだため、検索をサービスの一つとして提供するウェブサイトを単に検索サイトと呼ぶことはなくなっている。広義には、インターネットに限定せず情報を検索するシステム全般を含む。狭義の検索エンジンは、ロボット型検索エンジン、ディレクトリ型検索エンジン、メタ検索エンジンなどに分類される。広義の検索エンジンとしては、ある特定のウェブサイト内に登録されているテキスト情報の全文検索機能を備えたソフトウェア(全文検索システム)等がある。検索エンジンは、検索窓と呼ばれるボックスにキーワードを入力して検索をかけるもので、全文検索が可能なものと不可能なものとがある。検索サイトを一般に「検索エンジン」と呼ぶことはあるが、厳密には検索サイト自体は検索エンジンでない。", - "timestamp": "2018-05-30T00:52:00Z", - "_type": "jawiki" + "id": "jawiki_1", + "fields": { + "title_ja": "検索エンジン", + "text_ja": "検索エンジン(けんさくエンジン、英語: search engine)は、狭義にはインターネットに存在する情報(ウェブページ、ウェブサイト、画像ファイル、ネットニュースなど)を検索する機能およびそのプログラム。インターネットの普及初期には、検索としての機能のみを提供していたウェブサイトそのものを検索エンジンと呼んだが、現在では様々なサービスが加わったポータルサイト化が進んだため、検索をサービスの一つとして提供するウェブサイトを単に検索サイトと呼ぶことはなくなっている。広義には、インターネットに限定せず情報を検索するシステム全般を含む。狭義の検索エンジンは、ロボット型検索エンジン、ディレクトリ型検索エンジン、メタ検索エンジンなどに分類される。広義の検索エンジンとしては、ある特定のウェブサイト内に登録されているテキスト情報の全文検索機能を備えたソフトウェア(全文検索システム)等がある。検索エンジンは、検索窓と呼ばれるボックスにキーワードを入力して検索をかけるもので、全文検索が可能なものと不可能なものとがある。検索サイトを一般に「検索エンジン」と呼ぶことはあるが、厳密には検索サイト自体は検索エンジンでない。", + "timestamp": "2018-05-30T00:52:00Z", + "_type": "jawiki" + } } diff --git a/example/wiki_doc_knwiki_1.json b/example/wiki_doc_knwiki_1.json index cdd3ac0..a24e9cc 100644 --- a/example/wiki_doc_knwiki_1.json +++ b/example/wiki_doc_knwiki_1.json @@ -1,6 +1,9 @@ { - "title_kn": "ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ", - "text_kn": "ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ ಎಂದರೆ World Wide Webನಲ್ಲಿ ಮಾಹಿತಿ ಹುಡುಕುವುದಕ್ಕಾಗಿ ವಿನ್ಯಾಸಗೊಳಿಸಲಾದ ಒಂದು ಸಾಧನ. ಹುಡುಕಾಟದ ಫಲಿತಾಂಶಗಳನ್ನು ಸಾಮಾನ್ಯವಾಗಿ ಒಂದು ಪಟ್ಟಿಯ ರೂಪದಲ್ಲಿ ಪ್ರಸ್ತುತಪಡಿಸಲಾಗುತ್ತದೆ ಮತ್ತು ಇವನ್ನು ’ಹಿಟ್ಸ್’ ಎಂದು ಕರೆಯಲಾಗುತ್ತದೆ. ಈ ಮಾಹಿತಿಯು ಅನೇಕ ಜಾಲ ಪುಟಗಳು, ಚಿತ್ರಗಳು, ಮಾಹಿತಿ ಹಾಗೂ ಇತರೆ ಕಡತಗಳನ್ನು ಹೊಂದಿರಬಹುದು. ಕೆಲವು ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಬೇರೆ ದತ್ತಸಂಚಯಗಳು ಅಥವಾ ಮುಕ್ತ ಮಾಹಿತಿ ಸೂಚಿಗಳಿಂದ ದತ್ತಾಂಶಗಳ ಗಣಿಗಾರಿಕೆ ಮಾಡಿ ಹೊರತೆಗೆಯುತ್ತವೆ. ಜಾಲ ಮಾಹಿತಿಸೂಚಿಗಳನ್ನು ಸಂಬಂಧಿಸಿದ ಸಂಪಾದಕರು ನಿರ್ವಹಿಸಿದರೆ, ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಗಣನಪದ್ಧತಿಯ ಮೂಲಕ ಅಥವಾ ಗಣನಪದ್ಧತಿ ಮತ್ತು ಮಾನವ ಹೂಡುವಳಿಯ ಮಿಶ್ರಣದ ಮುಖಾಂತರ ಕಾರ್ಯನಿರ್ವಹಿಸುತ್ತವೆ.", - "timestamp": "2017-10-03T14:13:00Z", - "_type": "knwiki" + "id": "knwiki_1", + "fields": { + "title_kn": "ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ", + "text_kn": "ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ ಎಂದರೆ World Wide Webನಲ್ಲಿ ಮಾಹಿತಿ ಹುಡುಕುವುದಕ್ಕಾಗಿ ವಿನ್ಯಾಸಗೊಳಿಸಲಾದ ಒಂದು ಸಾಧನ. ಹುಡುಕಾಟದ ಫಲಿತಾಂಶಗಳನ್ನು ಸಾಮಾನ್ಯವಾಗಿ ಒಂದು ಪಟ್ಟಿಯ ರೂಪದಲ್ಲಿ ಪ್ರಸ್ತುತಪಡಿಸಲಾಗುತ್ತದೆ ಮತ್ತು ಇವನ್ನು ’ಹಿಟ್ಸ್’ ಎಂದು ಕರೆಯಲಾಗುತ್ತದೆ. ಈ ಮಾಹಿತಿಯು ಅನೇಕ ಜಾಲ ಪುಟಗಳು, ಚಿತ್ರಗಳು, ಮಾಹಿತಿ ಹಾಗೂ ಇತರೆ ಕಡತಗಳನ್ನು ಹೊಂದಿರಬಹುದು. ಕೆಲವು ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಬೇರೆ ದತ್ತಸಂಚಯಗಳು ಅಥವಾ ಮುಕ್ತ ಮಾಹಿತಿ ಸೂಚಿಗಳಿಂದ ದತ್ತಾಂಶಗಳ ಗಣಿಗಾರಿಕೆ ಮಾಡಿ ಹೊರತೆಗೆಯುತ್ತವೆ. ಜಾಲ ಮಾಹಿತಿಸೂಚಿಗಳನ್ನು ಸಂಬಂಧಿಸಿದ ಸಂಪಾದಕರು ನಿರ್ವಹಿಸಿದರೆ, ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಗಣನಪದ್ಧತಿಯ ಮೂಲಕ ಅಥವಾ ಗಣನಪದ್ಧತಿ ಮತ್ತು ಮಾನವ ಹೂಡುವಳಿಯ ಮಿಶ್ರಣದ ಮುಖಾಂತರ ಕಾರ್ಯನಿರ್ವಹಿಸುತ್ತವೆ.", + "timestamp": "2017-10-03T14:13:00Z", + "_type": "knwiki" + } } diff --git a/example/wiki_doc_kowiki_1.json b/example/wiki_doc_kowiki_1.json index 57ff513..3a612fe 100644 --- a/example/wiki_doc_kowiki_1.json +++ b/example/wiki_doc_kowiki_1.json @@ -1,6 +1,9 @@ { - "title_cjk": "검색 엔진", - "text_cjk": "검색 엔진은 컴퓨터 시스템에 저장된 정보를 찾아주는 것을 도와주도록 설계된 정보 검색 시스템이다. 이러한 검색 결과는 목록으로 표현되는 것이 보통이다. 검색 엔진을 사용하면 정보를 찾는데 필요한 시간을 최소화할 수 있다. 가장 눈에 띄는 형태의 공용 검색 엔진으로는 웹 검색 엔진이 있으며 월드 와이드 웹에서 정보를 찾아준다.", - "timestamp": "2017-11-19T12:50:00Z", - "_type": "kowiki" + "id": "kowiki_1", + "fields": { + "title_cjk": "검색 엔진", + "text_cjk": "검색 엔진은 컴퓨터 시스템에 저장된 정보를 찾아주는 것을 도와주도록 설계된 정보 검색 시스템이다. 이러한 검색 결과는 목록으로 표현되는 것이 보통이다. 검색 엔진을 사용하면 정보를 찾는데 필요한 시간을 최소화할 수 있다. 가장 눈에 띄는 형태의 공용 검색 엔진으로는 웹 검색 엔진이 있으며 월드 와이드 웹에서 정보를 찾아준다.", + "timestamp": "2017-11-19T12:50:00Z", + "_type": "kowiki" + } } diff --git a/example/wiki_doc_mlwiki_1.json b/example/wiki_doc_mlwiki_1.json index d172ff4..09c633b 100644 --- a/example/wiki_doc_mlwiki_1.json +++ b/example/wiki_doc_mlwiki_1.json @@ -1,6 +1,9 @@ { - "title_ml": "വെബ് സെർച്ച് എഞ്ചിൻ", - "text_ml": "വേൾഡ് വൈഡ് വെബ്ബിലുള്ള വിവരങ്ങൾ തിരയാനുള്ള ഒരു ഉപാധിയാണ്‌ വെബ് സെർച്ച് എഞ്ചിൻ അഥവാ സെർച്ച് എഞ്ചിൻ. തിരച്ചിൽ ഫലങ്ങൾ സാധാരണായായി ഒരു പട്ടികയായി നൽകുന്നു, തിരച്ചിൽ ഫലങ്ങളെ ഹിറ്റുകൾ എന്നാണ്‌ വിളിച്ചുവരുന്നത്[അവലംബം ആവശ്യമാണ്]. തിരച്ചിൽ ഫലങ്ങളിൽ വെബ് പേജുകൾ, ചിത്രങ്ങൾ, വിവരങ്ങൾ, വെബ്ബിലുള്ള മറ്റ് ഫയൽ തരങ്ങൾ എന്നിവ ഉൾപ്പെടാം. അൽഗോരിതങ്ങൾ ഉപയോഗിച്ചാണ് സെർച്ച് എഞ്ചിനുകൾ പ്രവർത്തിക്കുന്നത്.", - "timestamp": "2010-05-05T15:06:00Z", - "_type": "mlwiki" + "id": "mlwiki_1", + "fields": { + "title_ml": "വെബ് സെർച്ച് എഞ്ചിൻ", + "text_ml": "വേൾഡ് വൈഡ് വെബ്ബിലുള്ള വിവരങ്ങൾ തിരയാനുള്ള ഒരു ഉപാധിയാണ്‌ വെബ് സെർച്ച് എഞ്ചിൻ അഥവാ സെർച്ച് എഞ്ചിൻ. തിരച്ചിൽ ഫലങ്ങൾ സാധാരണായായി ഒരു പട്ടികയായി നൽകുന്നു, തിരച്ചിൽ ഫലങ്ങളെ ഹിറ്റുകൾ എന്നാണ്‌ വിളിച്ചുവരുന്നത്[അവലംബം ആവശ്യമാണ്]. തിരച്ചിൽ ഫലങ്ങളിൽ വെബ് പേജുകൾ, ചിത്രങ്ങൾ, വിവരങ്ങൾ, വെബ്ബിലുള്ള മറ്റ് ഫയൽ തരങ്ങൾ എന്നിവ ഉൾപ്പെടാം. അൽഗോരിതങ്ങൾ ഉപയോഗിച്ചാണ് സെർച്ച് എഞ്ചിനുകൾ പ്രവർത്തിക്കുന്നത്.", + "timestamp": "2010-05-05T15:06:00Z", + "_type": "mlwiki" + } } diff --git a/example/wiki_doc_nlwiki_1.json b/example/wiki_doc_nlwiki_1.json index d2ada6a..0b2a52f 100644 --- a/example/wiki_doc_nlwiki_1.json +++ b/example/wiki_doc_nlwiki_1.json @@ -1,6 +1,9 @@ { - "title_nl": "Zoekmachine", - "text_nl": "Een zoekmachine is een computerprogramma waarmee informatie kan worden gezocht in een bepaalde collectie; dit kan een bibliotheek, het internet, of een persoonlijke verzameling zijn. Zonder nadere aanduiding wordt meestal een webdienst bedoeld waarmee met behulp van vrije trefwoorden volledige tekst (full text) kan worden gezocht in het gehele wereldwijde web. In tegenstelling tot startpagina's of webgidsen is er geen of zeer weinig menselijke tussenkomst nodig; het bezoeken van de webpagina's en het sorteren van de rangschikkingen gebeurt met behulp van een algoritme. Google is wereldwijd de meest gebruikte zoekmachine, andere populaire zoekmachines zijn Yahoo!, Bing en Baidu.", - "timestamp": "2018-05-07T11:05:00Z", - "_type": "nlwiki" + "id": "nlwiki_1", + "fields": { + "title_nl": "Zoekmachine", + "text_nl": "Een zoekmachine is een computerprogramma waarmee informatie kan worden gezocht in een bepaalde collectie; dit kan een bibliotheek, het internet, of een persoonlijke verzameling zijn. Zonder nadere aanduiding wordt meestal een webdienst bedoeld waarmee met behulp van vrije trefwoorden volledige tekst (full text) kan worden gezocht in het gehele wereldwijde web. In tegenstelling tot startpagina's of webgidsen is er geen of zeer weinig menselijke tussenkomst nodig; het bezoeken van de webpagina's en het sorteren van de rangschikkingen gebeurt met behulp van een algoritme. Google is wereldwijd de meest gebruikte zoekmachine, andere populaire zoekmachines zijn Yahoo!, Bing en Baidu.", + "timestamp": "2018-05-07T11:05:00Z", + "_type": "nlwiki" + } } diff --git a/example/wiki_doc_nowiki_1.json b/example/wiki_doc_nowiki_1.json index 0b01a24..39d5a35 100644 --- a/example/wiki_doc_nowiki_1.json +++ b/example/wiki_doc_nowiki_1.json @@ -1,6 +1,9 @@ { - "title_no": "Søkemotor", - "text_no": "En søkemotor er en type programvare som leter frem informasjon fra Internett (nettsider eller andre nettressurser) eller begrenset til et datasystem, der informasjonen samsvarer med et gitt søk, og rangerer treffene etter hva den oppfatter som mest relevant. Typisk ligger søkemotoren tilgjengelig som et nettsted, der brukeren legger inn søkeord ev. sammen med filterinnstillinger, og treffene vises gjerne som klikkbare lenker. Søkemotoren kan enten gjøre søk på hele Internett (for eksempel Google, Bing, Kvasir og Yahoo!), innenfor et bestemt nettsted (for eksempel søk innenfor VGs nettavis), eller innenfor et bestemt tema (f.eks. Kelkoo, som søker etter priser på produkter, og Picsearch, som søker etter bilder). En bedrift kan også sette opp en intern bedrifts-søkemotor for å få enklere tilgang til alle dokumenter og databaser i bedriften.", - "timestamp": "2018-02-05T14:15:00Z", - "_type": "nowiki" + "id": "nowiki_1", + "fields": { + "title_no": "Søkemotor", + "text_no": "En søkemotor er en type programvare som leter frem informasjon fra Internett (nettsider eller andre nettressurser) eller begrenset til et datasystem, der informasjonen samsvarer med et gitt søk, og rangerer treffene etter hva den oppfatter som mest relevant. Typisk ligger søkemotoren tilgjengelig som et nettsted, der brukeren legger inn søkeord ev. sammen med filterinnstillinger, og treffene vises gjerne som klikkbare lenker. Søkemotoren kan enten gjøre søk på hele Internett (for eksempel Google, Bing, Kvasir og Yahoo!), innenfor et bestemt nettsted (for eksempel søk innenfor VGs nettavis), eller innenfor et bestemt tema (f.eks. Kelkoo, som søker etter priser på produkter, og Picsearch, som søker etter bilder). En bedrift kan også sette opp en intern bedrifts-søkemotor for å få enklere tilgang til alle dokumenter og databaser i bedriften.", + "timestamp": "2018-02-05T14:15:00Z", + "_type": "nowiki" + } } diff --git a/example/wiki_doc_pswiki_1.json b/example/wiki_doc_pswiki_1.json index b0ba67f..645fa9e 100644 --- a/example/wiki_doc_pswiki_1.json +++ b/example/wiki_doc_pswiki_1.json @@ -1,6 +1,9 @@ { - "title_ps": "انټرنټ لټوونکی ماشين", - "text_ps": "نټرنټ د معلوماتو يوه داسې پراخه نړۍ ده چې يوه پوله هم نه لري. هره ثانيه په زرگونو معلوماتي توکي په کې ورځای کېږي، خو بيا هم د ډکېدو کومه اندېښنه نه رامنځته کېږي. حيرانوونکې خبره بيا دا ده چې دغه ټول معلومات په داسې مهارت سره په دغه نړۍ کې ځای شوي دي، چې سړی يې د سترگو په رپ کې د نړۍ په هر گوټ کې ترلاسه کولای شي. د کيبورډ په يو دوو تڼيو زور کولو او د موږك په يو دوو کليکونو سره خپلو ټولو پوښتنو ته ځواب موندلای شئ. ټول معلومات په ځانگړو انټرنټ پاڼو کې خوندي وي، نو که سړي ته د يوې پاڼې پته معلومه وي نو سم له لاسه به دغه پاڼه د انټرنټ پاڼو په کتونکي پروگرام کې پرانيزي، خو که سړی بيا يو معلومات غواړي او د هغې پاڼې پته ورسره نه وي، چې دغه ځانگړي معلومات په كې ځای شوي دي، نو بيا سړی يوه داسې پياوړي ځواک ته اړتيا لري، چې د سترگو په رپ کې ټول انټرنټ چاڼ کړي او دغه ځانگړي معلومات راوباسي. له نېکه مرغه د دغه ځواک غم خوړل شوی دی او ډېرInternet Search Engine انټرنټ لټوونکي ماشينونه جوړ کړای شوي دي، چې په وړيا توگه ټول انټرنټ تر ثانيو هم په لږ وخت کې چاڼ کوي او زموږ د خوښې معلومات راښکاره کوي. دغو ماشينونو ته سړی يوه ځانگړې کليمه ورکوي او هغوی ټول انټرنټ په دغې وركړل شوې کلمې پسې لټوي او هر دقيق معلومات چې لاسته ورځي، نو د کمپيوټر پر پرده يې راښکاره کوي. د دغو ماشينونو په ډله کې يو پياوړی ماشين د Google په نوم دی. د نوموړي ماشين بنسټ په ١٩٩٨م کال کې د متحدو ايالاتو د Standford پوهنتون دوو محصلينو Larry Page او Sergey Brin کښېښود. د دغه ماشين خدمات سړی د www.google.com په انټرنټ پاڼه کې کارولای شي. نوموړی ماشين د نړۍ په گڼ شمېر ژبو باندې خدمات وړاندې کوي او داسې چټک او دقيق لټون کوي چې د انټرنټ نور ډېر غښتلي ماشينونه ورته گوته پر غاښ پاتې دي. گوگل په ټوله نړۍ کې کارول کېږي او تر نيمي ثانيي هم په لنډ وخت کې په ميليارډونو انټرنټ پاڼې چاڼ کوي او خپلو کاروونکو ته په پرتله ييزه توگه دقيق معلومات راباسي. گوگل په يوه ورځ کې څه كمُ ٢٠٠ ميليونه پوښتنې ځوابوي. دا ( گوگل) تورى خپله د يو امريکايي رياضيپوه د وراره له خوا په لومړي ځل د يوې لوبې لپاره کارول شوی و. هغه دغه تورى د يو سلو صفرونو ( 1000?.) غوندې لوی عدد ته د نوم په توگه کاراوه. دغه نوم د نوموړي شرکت د دغه توان ښكارندوى دى، چې په لنډ وخت کې په لويه کچه پوښتنو ته ځواب ورکوي او معلومات لټوي. سړی چې د گوگل چټکتيا او دقيقوالي ته ځير شي، نو دا پوښته راپورته کېږي چې د دې ماشين شا ته به څومره پرمختللي کمپيوټرونه او پياوړی تخنيک پټ وي. خو اصلاً د گوگل شا ته په يوه لوی جال کې د منځنۍ بيې کمپيوټرونه سره نښلول شوي دي . په دې توگه په زرگونو کمپيوټرونه هممهاله په کار بوخت وي، چې په ترڅ کې يې د معلوماتو لټول او چاڼ کول چټکتيا مومي. د يوې پوښتنې له اخيستلو څخه راواخله معلوماتو تر لټولو او بيا د دقيقوالي له مخې په يوه ځانگړي طرز بېرته کاروونکي يا پوښتونكي تر ښوولو پورې ټولې چارې د درېيو Software پروگرامونه په لاس کې دي، چې په دغه زرگونو کمپيوټرونو کې ځای پر ځای شوي دي.", - "timestamp": "2015-12-15T18:53:00Z", - "_type": "pswiki" + "id": "pswiki_1", + "fields": { + "title_ps": "انټرنټ لټوونکی ماشين", + "text_ps": "نټرنټ د معلوماتو يوه داسې پراخه نړۍ ده چې يوه پوله هم نه لري. هره ثانيه په زرگونو معلوماتي توکي په کې ورځای کېږي، خو بيا هم د ډکېدو کومه اندېښنه نه رامنځته کېږي. حيرانوونکې خبره بيا دا ده چې دغه ټول معلومات په داسې مهارت سره په دغه نړۍ کې ځای شوي دي، چې سړی يې د سترگو په رپ کې د نړۍ په هر گوټ کې ترلاسه کولای شي. د کيبورډ په يو دوو تڼيو زور کولو او د موږك په يو دوو کليکونو سره خپلو ټولو پوښتنو ته ځواب موندلای شئ. ټول معلومات په ځانگړو انټرنټ پاڼو کې خوندي وي، نو که سړي ته د يوې پاڼې پته معلومه وي نو سم له لاسه به دغه پاڼه د انټرنټ پاڼو په کتونکي پروگرام کې پرانيزي، خو که سړی بيا يو معلومات غواړي او د هغې پاڼې پته ورسره نه وي، چې دغه ځانگړي معلومات په كې ځای شوي دي، نو بيا سړی يوه داسې پياوړي ځواک ته اړتيا لري، چې د سترگو په رپ کې ټول انټرنټ چاڼ کړي او دغه ځانگړي معلومات راوباسي. له نېکه مرغه د دغه ځواک غم خوړل شوی دی او ډېرInternet Search Engine انټرنټ لټوونکي ماشينونه جوړ کړای شوي دي، چې په وړيا توگه ټول انټرنټ تر ثانيو هم په لږ وخت کې چاڼ کوي او زموږ د خوښې معلومات راښکاره کوي. دغو ماشينونو ته سړی يوه ځانگړې کليمه ورکوي او هغوی ټول انټرنټ په دغې وركړل شوې کلمې پسې لټوي او هر دقيق معلومات چې لاسته ورځي، نو د کمپيوټر پر پرده يې راښکاره کوي. د دغو ماشينونو په ډله کې يو پياوړی ماشين د Google په نوم دی. د نوموړي ماشين بنسټ په ١٩٩٨م کال کې د متحدو ايالاتو د Standford پوهنتون دوو محصلينو Larry Page او Sergey Brin کښېښود. د دغه ماشين خدمات سړی د www.google.com په انټرنټ پاڼه کې کارولای شي. نوموړی ماشين د نړۍ په گڼ شمېر ژبو باندې خدمات وړاندې کوي او داسې چټک او دقيق لټون کوي چې د انټرنټ نور ډېر غښتلي ماشينونه ورته گوته پر غاښ پاتې دي. گوگل په ټوله نړۍ کې کارول کېږي او تر نيمي ثانيي هم په لنډ وخت کې په ميليارډونو انټرنټ پاڼې چاڼ کوي او خپلو کاروونکو ته په پرتله ييزه توگه دقيق معلومات راباسي. گوگل په يوه ورځ کې څه كمُ ٢٠٠ ميليونه پوښتنې ځوابوي. دا ( گوگل) تورى خپله د يو امريکايي رياضيپوه د وراره له خوا په لومړي ځل د يوې لوبې لپاره کارول شوی و. هغه دغه تورى د يو سلو صفرونو ( 1000?.) غوندې لوی عدد ته د نوم په توگه کاراوه. دغه نوم د نوموړي شرکت د دغه توان ښكارندوى دى، چې په لنډ وخت کې په لويه کچه پوښتنو ته ځواب ورکوي او معلومات لټوي. سړی چې د گوگل چټکتيا او دقيقوالي ته ځير شي، نو دا پوښته راپورته کېږي چې د دې ماشين شا ته به څومره پرمختللي کمپيوټرونه او پياوړی تخنيک پټ وي. خو اصلاً د گوگل شا ته په يوه لوی جال کې د منځنۍ بيې کمپيوټرونه سره نښلول شوي دي . په دې توگه په زرگونو کمپيوټرونه هممهاله په کار بوخت وي، چې په ترڅ کې يې د معلوماتو لټول او چاڼ کول چټکتيا مومي. د يوې پوښتنې له اخيستلو څخه راواخله معلوماتو تر لټولو او بيا د دقيقوالي له مخې په يوه ځانگړي طرز بېرته کاروونکي يا پوښتونكي تر ښوولو پورې ټولې چارې د درېيو Software پروگرامونه په لاس کې دي، چې په دغه زرگونو کمپيوټرونو کې ځای پر ځای شوي دي.", + "timestamp": "2015-12-15T18:53:00Z", + "_type": "pswiki" + } } diff --git a/example/wiki_doc_ptwiki_1.json b/example/wiki_doc_ptwiki_1.json index 8fb25c4..b79cbb6 100644 --- a/example/wiki_doc_ptwiki_1.json +++ b/example/wiki_doc_ptwiki_1.json @@ -1,6 +1,9 @@ { - "title_pt": "Motor de busca", - "text_pt": "Motor de pesquisa (português europeu) ou ferramenta de busca (português brasileiro) ou buscador (em inglês: search engine) é um programa desenhado para procurar palavras-chave fornecidas pelo utilizador em documentos e bases de dados. No contexto da internet, um motor de pesquisa permite procurar palavras-chave em documentos alojados na world wide web, como aqueles que se encontram armazenados em websites. Os motores de busca surgiram logo após o aparecimento da Internet, com a intenção de prestar um serviço extremamente importante: a busca de qualquer informação na rede, apresentando os resultados de uma forma organizada, e também com a proposta de fazer isto de uma maneira rápida e eficiente. A partir deste preceito básico, diversas empresas se desenvolveram, chegando algumas a valer milhões de dólares. Entre as maiores empresas encontram-se o Google, o Yahoo, o Bing, o Lycos, o Cadê e, mais recentemente, a Amazon.com com o seu mecanismo de busca A9 porém inativo. Os buscadores se mostraram imprescindíveis para o fluxo de acesso e a conquista novos visitantes. Antes do advento da Web, havia sistemas para outros protocolos ou usos, como o Archie para sites FTP anônimos e o Veronica para o Gopher (protocolo de redes de computadores que foi desenhado para indexar repositórios de documentos na Internet, baseado-se em menus).", - "timestamp": "2017-11-09T14:38:00Z", - "_type": "ptwiki" + "id": "ptwiki_1", + "fields": { + "title_pt": "Motor de busca", + "text_pt": "Motor de pesquisa (português europeu) ou ferramenta de busca (português brasileiro) ou buscador (em inglês: search engine) é um programa desenhado para procurar palavras-chave fornecidas pelo utilizador em documentos e bases de dados. No contexto da internet, um motor de pesquisa permite procurar palavras-chave em documentos alojados na world wide web, como aqueles que se encontram armazenados em websites. Os motores de busca surgiram logo após o aparecimento da Internet, com a intenção de prestar um serviço extremamente importante: a busca de qualquer informação na rede, apresentando os resultados de uma forma organizada, e também com a proposta de fazer isto de uma maneira rápida e eficiente. A partir deste preceito básico, diversas empresas se desenvolveram, chegando algumas a valer milhões de dólares. Entre as maiores empresas encontram-se o Google, o Yahoo, o Bing, o Lycos, o Cadê e, mais recentemente, a Amazon.com com o seu mecanismo de busca A9 porém inativo. Os buscadores se mostraram imprescindíveis para o fluxo de acesso e a conquista novos visitantes. Antes do advento da Web, havia sistemas para outros protocolos ou usos, como o Archie para sites FTP anônimos e o Veronica para o Gopher (protocolo de redes de computadores que foi desenhado para indexar repositórios de documentos na Internet, baseado-se em menus).", + "timestamp": "2017-11-09T14:38:00Z", + "_type": "ptwiki" + } } diff --git a/example/wiki_doc_rowiki_1.json b/example/wiki_doc_rowiki_1.json index ca80608..7562616 100644 --- a/example/wiki_doc_rowiki_1.json +++ b/example/wiki_doc_rowiki_1.json @@ -1,6 +1,9 @@ { - "title_ro": "Motor de căutare", - "text_ro": "Un motor de căutare este un program apelabil căutător, care accesează Internetul în mod automat și frecvent și care stochează titlul, cuvinte cheie și, parțial, chiar conținutul paginilor web într-o bază de date. În momentul în care un utilizator apelează la un motor de căutare pentru a găsi o informație, o anumită frază sau un cuvânt, motorul de căutare se va uita în această bază de date și, în funcție de anumite criterii de prioritate, va crea și afișa o listă de rezultate (engleză: hit list ).", - "timestamp": "2018-06-12T08:59:00Z", - "_type": "rowiki" + "id": "rowiki_1", + "fields": { + "title_ro": "Motor de căutare", + "text_ro": "Un motor de căutare este un program apelabil căutător, care accesează Internetul în mod automat și frecvent și care stochează titlul, cuvinte cheie și, parțial, chiar conținutul paginilor web într-o bază de date. În momentul în care un utilizator apelează la un motor de căutare pentru a găsi o informație, o anumită frază sau un cuvânt, motorul de căutare se va uita în această bază de date și, în funcție de anumite criterii de prioritate, va crea și afișa o listă de rezultate (engleză: hit list ).", + "timestamp": "2018-06-12T08:59:00Z", + "_type": "rowiki" + } } diff --git a/example/wiki_doc_ruwiki_1.json b/example/wiki_doc_ruwiki_1.json index 3733d50..818b84f 100644 --- a/example/wiki_doc_ruwiki_1.json +++ b/example/wiki_doc_ruwiki_1.json @@ -1,6 +1,9 @@ { - "title_ru": "Поисковая машина", - "text_ru": "Поисковая машина (поиско́вый движо́к) — комплекс программ, предназначенный для поиска информации. Обычно является частью поисковой системы. Основными критериями качества работы поисковой машины являются релевантность (степень соответствия запроса и найденного, т.е. уместность результата), полнота индекса, учёт морфологии языка.", - "timestamp": "2017-03-22T01:16:00Z", - "_type": "ruwiki" + "id": "ruwiki_1", + "fields": { + "title_ru": "Поисковая машина", + "text_ru": "Поисковая машина (поиско́вый движо́к) — комплекс программ, предназначенный для поиска информации. Обычно является частью поисковой системы. Основными критериями качества работы поисковой машины являются релевантность (степень соответствия запроса и найденного, т.е. уместность результата), полнота индекса, учёт морфологии языка.", + "timestamp": "2017-03-22T01:16:00Z", + "_type": "ruwiki" + } } diff --git a/example/wiki_doc_svwiki_1.json b/example/wiki_doc_svwiki_1.json index 43f56cb..4c9210e 100644 --- a/example/wiki_doc_svwiki_1.json +++ b/example/wiki_doc_svwiki_1.json @@ -1,6 +1,9 @@ { - "title_sv": "Söktjänst", - "text_sv": "En söktjänst är en webbplats som gör det möjligt att söka efter innehåll på Internet. Söktjänsterna använder sökmotorer, även kallade sökrobotar, för att upptäcka, hämta in och indexera webbsidor.", - "timestamp": "2018-08-16T22:13:00Z", - "_type": "svwiki" + "id": "svwiki_1", + "fields": { + "title_sv": "Söktjänst", + "text_sv": "En söktjänst är en webbplats som gör det möjligt att söka efter innehåll på Internet. Söktjänsterna använder sökmotorer, även kallade sökrobotar, för att upptäcka, hämta in och indexera webbsidor.", + "timestamp": "2018-08-16T22:13:00Z", + "_type": "svwiki" + } } diff --git a/example/wiki_doc_tawiki_1.json b/example/wiki_doc_tawiki_1.json index 5f46729..1b7e1aa 100644 --- a/example/wiki_doc_tawiki_1.json +++ b/example/wiki_doc_tawiki_1.json @@ -1,6 +1,9 @@ { - "title_ta": "தேடுபொறி", - "text_ta": "தேடுபொறி அல்லது தேடற்பொறி என்பது ஒரு கணினி நிரலாகும். இது இணையத்தில் குவிந்து கிடக்கும் தகவல்களில் இருந்தோ கணினியில் இருக்கும் தகவல்களில் இருந்தோ நமக்குத் தேவையான தகவலைப்பெற உதவுகின்றது. பொதுவாகப் பாவனையாளர்கள் ஒரு விடயம் சம்பந்தமாகத் தேடுதலை ஒரு சொல்லை வைத்து தேடுவார்கள். தேடுபொறிகள் சுட்டிகளைப் பயன்படுத்தி விரைவான தேடலை மேற்கொள்ளும். தேடுபொறிகள் என்பது பொதுவாக இணையத் தேடுபொறிகளை அல்லது இணையத் தேடற்பொறிகளையே குறிக்கும். வேறுசில தேடுபொறிகள் உள்ளூர் வலையமைப்பை மாத்திரமே தேடும். இணைய தேடு பொறிகள் பல பில்லியன் பக்கங்களில் இருந்து நமக்குத் தேவையான மிகப் பொருத்தமான பக்கங்களைத் தேடித் தரும். வேறுசில தேடற்பொறிகள் செய்திக் குழுக்கள், தகவற்தளங்கள், திறந்த இணையத்தளங்களைப் பட்டியலிடும் DMOZ.org போன்ற இணையத் தளங்களைத் தேடும். மனிதர்களால் எழுதப்பட்ட இணையத் தளங்களைப் பட்டியலிடும் தளங்களைப் போன்றல்லாது தேடு பொறிகள் அல்காரிதங்களைப் பாவித்துத் தேடல்களை மேற்கொள்ளும். வேறு சில தேடற்பொறிகளோ தமது இடைமுகத்தை வழங்கினாலும் உண்மையில் வேறுசில தேடுபொறிகளே தேடலை மேற்கொள்ளும். ஆரம்ப காலத்தில் ASCII முறை வரியுருக்களை கொண்டே தேடு சொற்களை உள்ளிட முடிந்தது. தற்போது ஒருங்குறி எழுத்துக்குறிமுறையை பல தேடுபொறிகளும் ஆதரிப்பதால் ஆங்கிலத்தில் மட்டுமல்லாது உலக மொழிகள் அனைத்திலும் அவ்வம் மொழிப்பக்கங்களை தேடிப்பெறக்கூடியதாகவுள்ளது.", - "timestamp": "2017-12-24T10:30:00Z", - "_type": "tawiki" + "id": "tawiki_1", + "fields": { + "title_ta": "தேடுபொறி", + "text_ta": "தேடுபொறி அல்லது தேடற்பொறி என்பது ஒரு கணினி நிரலாகும். இது இணையத்தில் குவிந்து கிடக்கும் தகவல்களில் இருந்தோ கணினியில் இருக்கும் தகவல்களில் இருந்தோ நமக்குத் தேவையான தகவலைப்பெற உதவுகின்றது. பொதுவாகப் பாவனையாளர்கள் ஒரு விடயம் சம்பந்தமாகத் தேடுதலை ஒரு சொல்லை வைத்து தேடுவார்கள். தேடுபொறிகள் சுட்டிகளைப் பயன்படுத்தி விரைவான தேடலை மேற்கொள்ளும். தேடுபொறிகள் என்பது பொதுவாக இணையத் தேடுபொறிகளை அல்லது இணையத் தேடற்பொறிகளையே குறிக்கும். வேறுசில தேடுபொறிகள் உள்ளூர் வலையமைப்பை மாத்திரமே தேடும். இணைய தேடு பொறிகள் பல பில்லியன் பக்கங்களில் இருந்து நமக்குத் தேவையான மிகப் பொருத்தமான பக்கங்களைத் தேடித் தரும். வேறுசில தேடற்பொறிகள் செய்திக் குழுக்கள், தகவற்தளங்கள், திறந்த இணையத்தளங்களைப் பட்டியலிடும் DMOZ.org போன்ற இணையத் தளங்களைத் தேடும். மனிதர்களால் எழுதப்பட்ட இணையத் தளங்களைப் பட்டியலிடும் தளங்களைப் போன்றல்லாது தேடு பொறிகள் அல்காரிதங்களைப் பாவித்துத் தேடல்களை மேற்கொள்ளும். வேறு சில தேடற்பொறிகளோ தமது இடைமுகத்தை வழங்கினாலும் உண்மையில் வேறுசில தேடுபொறிகளே தேடலை மேற்கொள்ளும். ஆரம்ப காலத்தில் ASCII முறை வரியுருக்களை கொண்டே தேடு சொற்களை உள்ளிட முடிந்தது. தற்போது ஒருங்குறி எழுத்துக்குறிமுறையை பல தேடுபொறிகளும் ஆதரிப்பதால் ஆங்கிலத்தில் மட்டுமல்லாது உலக மொழிகள் அனைத்திலும் அவ்வம் மொழிப்பக்கங்களை தேடிப்பெறக்கூடியதாகவுள்ளது.", + "timestamp": "2017-12-24T10:30:00Z", + "_type": "tawiki" + } } diff --git a/example/wiki_doc_tewiki_1.json b/example/wiki_doc_tewiki_1.json index b014c8f..2cb70b5 100644 --- a/example/wiki_doc_tewiki_1.json +++ b/example/wiki_doc_tewiki_1.json @@ -1,6 +1,9 @@ { - "title_te": "వెబ్ శోధనా యంత్రం", - "text_te": "వెబ్ శోధన యంత్రం అనేది వరల్డ్ వైడ్ వెబ్/ప్రపంచ వ్యాప్త వెబ్లో సమాచారాన్ని శోదించటానికి తయారుచేసిన ఒక సాధనం. శోధన ఫలితాలు సాధారణంగా ఒక జాబితాలో ఇవ్వబడతాయి మరియు అవి సాధారణంగా హిట్స్ అని పిలువబడతాయి. ఆ సమాచారం వెబ్ పేజీలు, చిత్రాలు, సమాచారం మరియు ఇతర రకాలైన జాబితాలను కలిగి ఉంటుంది.కొన్ని శోధనా యంత్రాలు డేటా బేస్ లు లేదా ఓపెన్ డైరెక్టరీలలో అందుబాటులో ఉన్న సమాచారాన్ని కూడా వెలికితీస్తాయి. మానవ సంపాదకులచే నిర్వహించబడే క్రమపరిచిన వెబ్ డైరెక్టరీల లా కాకుండా, శోధనా యంత్రాలు సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి ద్వారా లేదా సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి మరియు మానవ శక్తిల మిశ్రమంతో పనిచేస్తాయి.", - "timestamp": "2017-06-19T11:22:00Z", - "_type": "tewiki" + "id": "tewiki_1", + "fields": { + "title_te": "వెబ్ శోధనా యంత్రం", + "text_te": "వెబ్ శోధన యంత్రం అనేది వరల్డ్ వైడ్ వెబ్/ప్రపంచ వ్యాప్త వెబ్లో సమాచారాన్ని శోదించటానికి తయారుచేసిన ఒక సాధనం. శోధన ఫలితాలు సాధారణంగా ఒక జాబితాలో ఇవ్వబడతాయి మరియు అవి సాధారణంగా హిట్స్ అని పిలువబడతాయి. ఆ సమాచారం వెబ్ పేజీలు, చిత్రాలు, సమాచారం మరియు ఇతర రకాలైన జాబితాలను కలిగి ఉంటుంది.కొన్ని శోధనా యంత్రాలు డేటా బేస్ లు లేదా ఓపెన్ డైరెక్టరీలలో అందుబాటులో ఉన్న సమాచారాన్ని కూడా వెలికితీస్తాయి. మానవ సంపాదకులచే నిర్వహించబడే క్రమపరిచిన వెబ్ డైరెక్టరీల లా కాకుండా, శోధనా యంత్రాలు సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి ద్వారా లేదా సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి మరియు మానవ శక్తిల మిశ్రమంతో పనిచేస్తాయి.", + "timestamp": "2017-06-19T11:22:00Z", + "_type": "tewiki" + } } diff --git a/example/wiki_doc_thwiki_1.json b/example/wiki_doc_thwiki_1.json index 81a233f..9379367 100644 --- a/example/wiki_doc_thwiki_1.json +++ b/example/wiki_doc_thwiki_1.json @@ -1,6 +1,9 @@ { - "title_th": "เสิร์ชเอนจิน", - "text_th": "เสิร์ชเอนจิน (search engine) หรือ โปรแกรมค้นหา คือ โปรแกรมที่ช่วยในการสืบค้นหาข้อมูล โดยเฉพาะข้อมูลบนอินเทอร์เน็ต โดยครอบคลุมทั้งข้อความ รูปภาพ ภาพเคลื่อนไหว เพลง ซอฟต์แวร์ แผนที่ ข้อมูลบุคคล กลุ่มข่าว และอื่น ๆ ซึ่งแตกต่างกันไปแล้วแต่โปรแกรมหรือผู้ให้บริการแต่ละราย. เสิร์ชเอนจินส่วนใหญ่จะค้นหาข้อมูลจากคำสำคัญ (คีย์เวิร์ด) ที่ผู้ใช้ป้อนเข้าไป จากนั้นก็จะแสดงรายการผลลัพธ์ที่มันคิดว่าผู้ใช้น่าจะต้องการขึ้นมา ในปัจจุบัน เสิร์ชเอนจินบางตัว เช่น กูเกิล จะบันทึกประวัติการค้นหาและการเลือกผลลัพธ์ของผู้ใช้ไว้ด้วย และจะนำประวัติที่บันทึกไว้นั้น มาช่วยกรองผลลัพธ์ในการค้นหาครั้งต่อ ๆ ไป", - "timestamp": "2016-06-18T11:06:00Z", - "_type": "thwiki" + "id": "thwiki_1", + "fields": { + "title_th": "เสิร์ชเอนจิน", + "text_th": "เสิร์ชเอนจิน (search engine) หรือ โปรแกรมค้นหา คือ โปรแกรมที่ช่วยในการสืบค้นหาข้อมูล โดยเฉพาะข้อมูลบนอินเทอร์เน็ต โดยครอบคลุมทั้งข้อความ รูปภาพ ภาพเคลื่อนไหว เพลง ซอฟต์แวร์ แผนที่ ข้อมูลบุคคล กลุ่มข่าว และอื่น ๆ ซึ่งแตกต่างกันไปแล้วแต่โปรแกรมหรือผู้ให้บริการแต่ละราย. เสิร์ชเอนจินส่วนใหญ่จะค้นหาข้อมูลจากคำสำคัญ (คีย์เวิร์ด) ที่ผู้ใช้ป้อนเข้าไป จากนั้นก็จะแสดงรายการผลลัพธ์ที่มันคิดว่าผู้ใช้น่าจะต้องการขึ้นมา ในปัจจุบัน เสิร์ชเอนจินบางตัว เช่น กูเกิล จะบันทึกประวัติการค้นหาและการเลือกผลลัพธ์ของผู้ใช้ไว้ด้วย และจะนำประวัติที่บันทึกไว้นั้น มาช่วยกรองผลลัพธ์ในการค้นหาครั้งต่อ ๆ ไป", + "timestamp": "2016-06-18T11:06:00Z", + "_type": "thwiki" + } } diff --git a/example/wiki_doc_trwiki_1.json b/example/wiki_doc_trwiki_1.json index bedbd13..14dace8 100644 --- a/example/wiki_doc_trwiki_1.json +++ b/example/wiki_doc_trwiki_1.json @@ -1,6 +1,9 @@ { - "title_tr": "Arama motoru", - "text_tr": "Arama motoru, İnternet üzerinde bulunan içeriği aramak için kullanılan bir mekanizmadır. Üç bileşenden oluşur: web robotu, arama indeksi ve kullanıcı arabirimi. Ancak arama sonuçları genellikle sık tıklanan internet sayfalarından oluşan bir liste olarak verilmektedir.", - "timestamp": "2018-03-13T17:37:00Z", - "_type": "trwiki" + "id": "trwiki_1", + "fields": { + "title_tr": "Arama motoru", + "text_tr": "Arama motoru, İnternet üzerinde bulunan içeriği aramak için kullanılan bir mekanizmadır. Üç bileşenden oluşur: web robotu, arama indeksi ve kullanıcı arabirimi. Ancak arama sonuçları genellikle sık tıklanan internet sayfalarından oluşan bir liste olarak verilmektedir.", + "timestamp": "2018-03-13T17:37:00Z", + "_type": "trwiki" + } } diff --git a/example/wiki_doc_zhwiki_1.json b/example/wiki_doc_zhwiki_1.json index f997795..98f1376 100644 --- a/example/wiki_doc_zhwiki_1.json +++ b/example/wiki_doc_zhwiki_1.json @@ -1,6 +1,9 @@ { - "title_zh": "搜索引擎", - "text_zh": "搜索引擎(英语:search engine)是一种信息检索系统,旨在协助搜索存储在计算机系统中的信息。搜索结果一般被称为“hits”,通常会以表单的形式列出。网络搜索引擎是最常见、公开的一种搜索引擎,其功能为搜索万维网上储存的信息.", - "timestamp": "2018-08-27T05:47:00Z", - "_type": "zhwiki" + "id": "zhwiki_1", + "fields": { + "title_zh": "搜索引擎", + "text_zh": "搜索引擎(英语:search engine)是一种信息检索系统,旨在协助搜索存储在计算机系统中的信息。搜索结果一般被称为“hits”,通常会以表单的形式列出。网络搜索引擎是最常见、公开的一种搜索引擎,其功能为搜索万维网上储存的信息.", + "timestamp": "2018-08-27T05:47:00Z", + "_type": "zhwiki" + } } diff --git a/example/wiki_search_request_simple.json b/example/wiki_search_request_simple.json index 9ed3040..e4cac4d 100644 --- a/example/wiki_search_request_simple.json +++ b/example/wiki_search_request_simple.json @@ -1,6 +1,6 @@ { "query": { - "query": "+text_en:search" + "query": "+_all:search" }, "size": 10, "from": 0, diff --git a/grpc/client.go b/grpc/client.go index cadfd2f..2d87208 100644 --- a/grpc/client.go +++ b/grpc/client.go @@ -19,6 +19,8 @@ import ( "errors" "math" + "github.com/mosuka/blast/indexutils" + "github.com/blevesearch/bleve" "github.com/golang/protobuf/ptypes/any" "github.com/golang/protobuf/ptypes/empty" @@ -350,7 +352,7 @@ func (c *Client) Search(searchRequest *bleve.SearchRequest, opts ...grpc.CallOpt return searchResult, nil } -func (c *Client) IndexDocument(docs []map[string]interface{}, opts ...grpc.CallOption) (int, error) { +func (c *Client) IndexDocument(docs []*indexutils.Document, opts ...grpc.CallOption) (int, error) { stream, err := c.client.IndexDocument(c.ctx, opts...) if err != nil { st, _ := status.FromError(err) @@ -359,8 +361,8 @@ func (c *Client) IndexDocument(docs []map[string]interface{}, opts ...grpc.CallO } for _, doc := range docs { - id := doc["id"].(string) - fields := doc["fields"].(map[string]interface{}) + id := doc.Id + fields := doc.Fields fieldsAny := &any.Any{} err := protobuf.UnmarshalAny(&fields, fieldsAny) diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go index 8cea720..f9a6772 100644 --- a/indexer/grpc_service.go +++ b/indexer/grpc_service.go @@ -23,6 +23,8 @@ import ( "sync" "time" + "github.com/mosuka/blast/indexutils" + "github.com/blevesearch/bleve" "github.com/golang/protobuf/ptypes/any" "github.com/golang/protobuf/ptypes/empty" @@ -840,7 +842,7 @@ func (s *GRPCService) Search(ctx context.Context, req *protobuf.SearchRequest) ( } func (s *GRPCService) IndexDocument(stream protobuf.Blast_IndexDocumentServer) error { - docs := make([]map[string]interface{}, 0) + docs := make([]*indexutils.Document, 0) for { req, err := stream.Recv() @@ -862,9 +864,10 @@ func (s *GRPCService) IndexDocument(stream protobuf.Blast_IndexDocumentServer) e fields := *ins.(*map[string]interface{}) // document - doc := map[string]interface{}{ - "id": req.Id, - "fields": fields, + doc, err := indexutils.NewDocument(req.Id, fields) + if err != nil { + s.logger.Error(err.Error()) + return status.Error(codes.Internal, err.Error()) } docs = append(docs, doc) diff --git a/indexer/http_handler.go b/indexer/http_handler.go index 984143d..3ad4dea 100644 --- a/indexer/http_handler.go +++ b/indexer/http_handler.go @@ -15,16 +15,20 @@ package indexer import ( + "bufio" "encoding/json" + "io" "io/ioutil" "net/http" + "strings" "time" "github.com/blevesearch/bleve" "github.com/gorilla/mux" - "github.com/mosuka/blast/errors" + blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/grpc" blasthttp "github.com/mosuka/blast/http" + "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/version" "github.com/prometheus/client_golang/prometheus/promhttp" "go.uber.org/zap" @@ -106,7 +110,7 @@ func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { fields, err := h.client.GetDocument(id) if err != nil { switch err { - case errors.ErrNotFound: + case blasterrors.ErrNotFound: status = http.StatusNotFound default: status = http.StatusInternalServerError @@ -127,7 +131,7 @@ func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } // map[string]interface{} -> bytes - content, err = json.MarshalIndent(fields, "", " ") + content, err = blasthttp.NewJSONMessage(fields) if err != nil { status = http.StatusInternalServerError @@ -168,11 +172,20 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer blasthttp.RecordMetrics(start, status, w, r) // create documents - docs := make([]map[string]interface{}, 0) + docs := make([]*indexutils.Document, 0) vars := mux.Vars(r) id := vars["id"] + bulk := func(values []string) bool { + for _, value := range values { + if strings.ToLower(value) == "true" { + return true + } + } + return false + }(r.URL.Query()["bulk"]) + bodyBytes, err := ioutil.ReadAll(r.Body) if err != nil { status = http.StatusInternalServerError @@ -192,8 +205,95 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } if id == "" { - // Indexing documents in bulk - err := json.Unmarshal(bodyBytes, &docs) + if bulk { + s := strings.NewReader(string(bodyBytes)) + reader := bufio.NewReader(s) + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + doc, err := indexutils.NewDocumentFromBytes(docBytes) + if err != nil { + status = http.StatusBadRequest + + msgMap := map[string]interface{}{ + "message": err.Error(), + "status": status, + } + + content, err = blasthttp.NewJSONMessage(msgMap) + if err != nil { + h.logger.Error(err.Error()) + } + + blasthttp.WriteResponse(w, content, status, h.logger) + return + } + docs = append(docs, doc) + } + break + } + status = http.StatusBadRequest + + msgMap := map[string]interface{}{ + "message": err.Error(), + "status": status, + } + + content, err = blasthttp.NewJSONMessage(msgMap) + if err != nil { + h.logger.Error(err.Error()) + } + + blasthttp.WriteResponse(w, content, status, h.logger) + return + } + + if len(docBytes) > 0 { + doc, err := indexutils.NewDocumentFromBytes(docBytes) + if err != nil { + status = http.StatusBadRequest + + msgMap := map[string]interface{}{ + "message": err.Error(), + "status": status, + } + + content, err = blasthttp.NewJSONMessage(msgMap) + if err != nil { + h.logger.Error(err.Error()) + } + + blasthttp.WriteResponse(w, content, status, h.logger) + return + } + docs = append(docs, doc) + } + } + } else { + doc, err := indexutils.NewDocumentFromBytes(bodyBytes) + if err != nil { + status = http.StatusBadRequest + + msgMap := map[string]interface{}{ + "message": err.Error(), + "status": status, + } + + content, err = blasthttp.NewJSONMessage(msgMap) + if err != nil { + h.logger.Error(err.Error()) + } + + blasthttp.WriteResponse(w, content, status, h.logger) + return + } + docs = append(docs, doc) + } + } else { + var fields map[string]interface{} + err = json.Unmarshal(bodyBytes, &fields) if err != nil { status = http.StatusBadRequest @@ -210,10 +310,8 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { blasthttp.WriteResponse(w, content, status, h.logger) return } - } else { - // Indexing a document - var fields map[string]interface{} - err := json.Unmarshal(bodyBytes, &fields) + + doc, err := indexutils.NewDocument(id, fields) if err != nil { status = http.StatusBadRequest @@ -231,11 +329,6 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - doc := map[string]interface{}{ - "id": id, - "fields": fields, - } - docs = append(docs, doc) } @@ -327,23 +420,36 @@ func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } if id == "" { - // Deleting documents in bulk - err := json.Unmarshal(bodyBytes, &ids) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) + s := strings.NewReader(string(bodyBytes)) + reader := bufio.NewReader(s) + for { + docId, err := reader.ReadString('\n') if err != nil { - h.logger.Error(err.Error()) + if err == io.EOF || err == io.ErrClosedPipe { + if docId != "" { + ids = append(ids, docId) + } + break + } + status = http.StatusBadRequest + + msgMap := map[string]interface{}{ + "message": err.Error(), + "status": status, + } + + content, err = blasthttp.NewJSONMessage(msgMap) + if err != nil { + h.logger.Error(err.Error()) + } + + blasthttp.WriteResponse(w, content, status, h.logger) + return } - blasthttp.WriteResponse(w, content, status, h.logger) - return + if docId != "" { + ids = append(ids, docId) + } } } else { // Deleting a document diff --git a/indexer/raft_server.go b/indexer/raft_server.go index 78d933f..6e1f511 100644 --- a/indexer/raft_server.go +++ b/indexer/raft_server.go @@ -23,6 +23,8 @@ import ( "path/filepath" "time" + "github.com/mosuka/blast/indexutils" + "github.com/blevesearch/bleve" "github.com/hashicorp/raft" raftboltdb "github.com/hashicorp/raft-boltdb" @@ -229,6 +231,17 @@ func (s *RaftServer) Stop() error { return nil } +func (s *RaftServer) raftServers() ([]raft.Server, error) { + cf := s.raft.GetConfiguration() + err := cf.Error() + if err != nil { + s.logger.Error(err.Error()) + return nil, err + } + + return cf.Configuration().Servers, nil +} + func (s *RaftServer) LeaderAddress(timeout time.Duration) (raft.ServerAddress, error) { ticker := time.NewTicker(100 * time.Millisecond) defer ticker.Stop() @@ -258,14 +271,13 @@ func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { return "", err } - cf := s.raft.GetConfiguration() - err = cf.Error() + servers, err := s.raftServers() if err != nil { s.logger.Error(err.Error()) return "", err } - for _, server := range cf.Configuration().Servers { + for _, server := range servers { if server.Address == leaderAddr { return server.ID, nil } @@ -379,15 +391,14 @@ func (s *RaftServer) deleteNodeConfig(nodeId string) error { } func (s *RaftServer) GetNode(id string) (map[string]interface{}, error) { - cf := s.raft.GetConfiguration() - err := cf.Error() + servers, err := s.raftServers() if err != nil { s.logger.Error(err.Error()) return nil, err } node := make(map[string]interface{}, 0) - for _, server := range cf.Configuration().Servers { + for _, server := range servers { if server.ID == raft.ServerID(id) { nodeConfig, err := s.getNodeConfig(id) if err != nil { @@ -408,14 +419,13 @@ func (s *RaftServer) SetNode(nodeId string, nodeConfig map[string]interface{}) e return raft.ErrNotLeader } - cf := s.raft.GetConfiguration() - err := cf.Error() + servers, err := s.raftServers() if err != nil { s.logger.Error(err.Error()) return err } - for _, server := range cf.Configuration().Servers { + for _, server := range servers { if server.ID == raft.ServerID(nodeId) { s.logger.Info("node already joined the cluster", zap.String("id", nodeId)) return nil @@ -453,15 +463,14 @@ func (s *RaftServer) DeleteNode(nodeId string) error { return raft.ErrNotLeader } - cf := s.raft.GetConfiguration() - err := cf.Error() + servers, err := s.raftServers() if err != nil { s.logger.Error(err.Error()) return err } // delete node from Raft cluster - for _, server := range cf.Configuration().Servers { + for _, server := range servers { if server.ID == raft.ServerID(nodeId) { s.logger.Debug("remove server", zap.String("node_id", nodeId)) f := s.raft.RemoveServer(server.ID, 0, 0) @@ -484,15 +493,14 @@ func (s *RaftServer) DeleteNode(nodeId string) error { } func (s *RaftServer) GetCluster() (map[string]interface{}, error) { - cf := s.raft.GetConfiguration() - err := cf.Error() + servers, err := s.raftServers() if err != nil { s.logger.Error(err.Error()) return nil, err } cluster := map[string]interface{}{} - for _, server := range cf.Configuration().Servers { + for _, server := range servers { node, err := s.GetNode(string(server.ID)) if err != nil { s.logger.Warn(err.Error()) @@ -535,7 +543,7 @@ func (s *RaftServer) Search(request *bleve.SearchRequest) (*bleve.SearchResult, return result, nil } -func (s *RaftServer) IndexDocument(docs []map[string]interface{}) (int, error) { +func (s *RaftServer) IndexDocument(docs []*indexutils.Document) (int, error) { if !s.IsLeader() { s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) return -1, raft.ErrNotLeader diff --git a/indexer/server_test.go b/indexer/server_test.go index 307ecc3..8a9f604 100644 --- a/indexer/server_test.go +++ b/indexer/server_test.go @@ -39,7 +39,7 @@ func TestServer_Start(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -67,7 +67,7 @@ func TestServer_Start(t *testing.T) { server.Stop() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // start server @@ -81,7 +81,7 @@ func TestServer_LivenessProbe(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -109,7 +109,7 @@ func TestServer_LivenessProbe(t *testing.T) { server.Stop() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // start server @@ -148,7 +148,7 @@ func TestServer_ReadinessProbe(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -176,7 +176,7 @@ func TestServer_ReadinessProbe(t *testing.T) { server.Stop() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // start server @@ -215,7 +215,7 @@ func TestServer_GetNode(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -243,7 +243,7 @@ func TestServer_GetNode(t *testing.T) { server.Stop() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // start server @@ -269,7 +269,7 @@ func TestServer_GetNode(t *testing.T) { // get node node, err := client.GetNode(nodeConfig.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNode := map[string]interface{}{ "node_config": nodeConfig.ToMap(), @@ -277,7 +277,7 @@ func TestServer_GetNode(t *testing.T) { } actNode := node if !reflect.DeepEqual(expNode, actNode) { - t.Errorf("expected content to see %v, saw %v", expNode, actNode) + t.Fatalf("expected content to see %v, saw %v", expNode, actNode) } } @@ -285,7 +285,7 @@ func TestServer_GetCluster(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -313,7 +313,7 @@ func TestServer_GetCluster(t *testing.T) { server.Stop() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // start server @@ -339,7 +339,7 @@ func TestServer_GetCluster(t *testing.T) { // get cluster cluster, err := client.GetCluster() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expCluster := map[string]interface{}{ nodeConfig.NodeId: map[string]interface{}{ @@ -349,7 +349,7 @@ func TestServer_GetCluster(t *testing.T) { } actCluster := cluster if !reflect.DeepEqual(expCluster, actCluster) { - t.Errorf("expected content to see %v, saw %v", expCluster, actCluster) + t.Fatalf("expected content to see %v, saw %v", expCluster, actCluster) } } @@ -357,7 +357,7 @@ func TestServer_GetIndexMapping(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -385,7 +385,7 @@ func TestServer_GetIndexMapping(t *testing.T) { server.Stop() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // start server @@ -424,7 +424,7 @@ func TestServer_GetIndexMapping(t *testing.T) { } if !reflect.DeepEqual(expIndexMapping, actIndexMapping) { - t.Errorf("expected content to see %v, saw %v", expIndexMapping, actIndexMapping) + t.Fatalf("expected content to see %v, saw %v", expIndexMapping, actIndexMapping) } } @@ -432,7 +432,7 @@ func TestServer_GetIndexType(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -460,7 +460,7 @@ func TestServer_GetIndexType(t *testing.T) { server.Stop() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // start server @@ -485,7 +485,7 @@ func TestServer_GetIndexType(t *testing.T) { expIndexType := indexConfig.IndexType if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } actIndexConfigMap, err := client.GetIndexConfig() @@ -496,7 +496,7 @@ func TestServer_GetIndexType(t *testing.T) { actIndexType := actIndexConfigMap["index_type"].(string) if !reflect.DeepEqual(expIndexType, actIndexType) { - t.Errorf("expected content to see %v, saw %v", expIndexType, actIndexType) + t.Fatalf("expected content to see %v, saw %v", expIndexType, actIndexType) } } @@ -504,7 +504,7 @@ func TestServer_GetIndexStorageType(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -532,7 +532,7 @@ func TestServer_GetIndexStorageType(t *testing.T) { server.Stop() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // start server @@ -557,7 +557,7 @@ func TestServer_GetIndexStorageType(t *testing.T) { expIndexStorageType := indexConfig.IndexStorageType if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } actIndexConfigMap, err := client.GetIndexConfig() @@ -568,7 +568,7 @@ func TestServer_GetIndexStorageType(t *testing.T) { actIndexStorageType := actIndexConfigMap["index_storage_type"].(string) if !reflect.DeepEqual(expIndexStorageType, actIndexStorageType) { - t.Errorf("expected content to see %v, saw %v", expIndexStorageType, actIndexStorageType) + t.Fatalf("expected content to see %v, saw %v", expIndexStorageType, actIndexStorageType) } } @@ -576,7 +576,7 @@ func TestServer_GetIndexStats(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -604,7 +604,7 @@ func TestServer_GetIndexStats(t *testing.T) { server.Stop() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // start server @@ -649,7 +649,7 @@ func TestServer_GetIndexStats(t *testing.T) { } if !reflect.DeepEqual(expIndexStats, actIndexStats) { - t.Errorf("expected content to see %v, saw %v", expIndexStats, actIndexStats) + t.Fatalf("expected content to see %v, saw %v", expIndexStats, actIndexStats) } } @@ -657,7 +657,7 @@ func TestServer_PutDocument(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -685,7 +685,7 @@ func TestServer_PutDocument(t *testing.T) { server.Stop() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // start server @@ -709,40 +709,40 @@ func TestServer_PutDocument(t *testing.T) { } // put document - docs := make([]map[string]interface{}, 0) + docs := make([]*indexutils.Document, 0) docPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") // read index mapping file docFile1, err := os.Open(docPath1) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } defer func() { _ = docFile1.Close() }() docBytes1, err := ioutil.ReadAll(docFile1) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } var docFields1 map[string]interface{} err = json.Unmarshal(docBytes1, &docFields1) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - doc1 := map[string]interface{}{ - "id": "doc1", - "fields": docFields1, + doc1, err := indexutils.NewDocument("doc1", docFields1) + if err != nil { + t.Fatalf("%v", err) } docs = append(docs, doc1) count, err := client.IndexDocument(docs) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expCount := 1 actCount := count if expCount != actCount { - t.Errorf("expected content to see %v, saw %v", expCount, actCount) + t.Fatalf("expected content to see %v, saw %v", expCount, actCount) } } @@ -750,7 +750,7 @@ func TestServer_GetDocument(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -778,7 +778,7 @@ func TestServer_GetDocument(t *testing.T) { server.Stop() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // start server @@ -802,60 +802,55 @@ func TestServer_GetDocument(t *testing.T) { } // put document - putDocs := make([]map[string]interface{}, 0) + putDocs := make([]*indexutils.Document, 0) putDocPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") // read index mapping file putDocFile1, err := os.Open(putDocPath1) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } defer func() { _ = putDocFile1.Close() }() putDocBytes1, err := ioutil.ReadAll(putDocFile1) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - var putDocFields1 map[string]interface{} - err = json.Unmarshal(putDocBytes1, &putDocFields1) + putDoc1, err := indexutils.NewDocumentFromBytes(putDocBytes1) if err != nil { - t.Errorf("%v", err) - } - putDoc1 := map[string]interface{}{ - "id": "doc1", - "fields": putDocFields1, + t.Fatalf("%v", err) } putDocs = append(putDocs, putDoc1) putCount, err := client.IndexDocument(putDocs) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expPutCount := 1 actPutCount := putCount if expPutCount != actPutCount { - t.Errorf("expected content to see %v, saw %v", expPutCount, actPutCount) + t.Fatalf("expected content to see %v, saw %v", expPutCount, actPutCount) } // get document - getDocFields1, err := client.GetDocument("doc1") + getDocFields1, err := client.GetDocument("enwiki_1") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - expGetDocFields1 := putDocFields1 + expGetDocFields1 := putDoc1.Fields actGetDocFields1 := getDocFields1 if !reflect.DeepEqual(expGetDocFields1, actGetDocFields1) { - t.Errorf("expected content to see %v, saw %v", expGetDocFields1, actGetDocFields1) + t.Fatalf("expected content to see %v, saw %v", expGetDocFields1, actGetDocFields1) } // get non-existing document getDocFields2, err := client.GetDocument("doc2") if err != errors.ErrNotFound { - t.Errorf("%v", err) + t.Fatalf("%v", err) } if getDocFields2 != nil { - t.Errorf("expected content to see nil, saw %v", getDocFields2) + t.Fatalf("expected content to see nil, saw %v", getDocFields2) } } @@ -863,7 +858,7 @@ func TestServer_DeleteDocument(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -891,7 +886,7 @@ func TestServer_DeleteDocument(t *testing.T) { server.Stop() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // start server @@ -915,89 +910,84 @@ func TestServer_DeleteDocument(t *testing.T) { } // put document - putDocs := make([]map[string]interface{}, 0) + putDocs := make([]*indexutils.Document, 0) putDocPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") // read index mapping file putDocFile1, err := os.Open(putDocPath1) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } defer func() { _ = putDocFile1.Close() }() putDocBytes1, err := ioutil.ReadAll(putDocFile1) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - var putDocFields1 map[string]interface{} - err = json.Unmarshal(putDocBytes1, &putDocFields1) + putDoc1, err := indexutils.NewDocumentFromBytes(putDocBytes1) if err != nil { - t.Errorf("%v", err) - } - putDoc1 := map[string]interface{}{ - "id": "doc1", - "fields": putDocFields1, + t.Fatalf("%v", err) } putDocs = append(putDocs, putDoc1) putCount, err := client.IndexDocument(putDocs) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expPutCount := 1 actPutCount := putCount if expPutCount != actPutCount { - t.Errorf("expected content to see %v, saw %v", expPutCount, actPutCount) + t.Fatalf("expected content to see %v, saw %v", expPutCount, actPutCount) } // get document - getDocFields1, err := client.GetDocument("doc1") + getDocFields1, err := client.GetDocument("enwiki_1") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - expGetDocFields1 := putDocFields1 + expGetDocFields1 := putDoc1.Fields actGetDocFields1 := getDocFields1 if !reflect.DeepEqual(expGetDocFields1, actGetDocFields1) { - t.Errorf("expected content to see %v, saw %v", expGetDocFields1, actGetDocFields1) + t.Fatalf("expected content to see %v, saw %v", expGetDocFields1, actGetDocFields1) } // get non-existing document getDocFields2, err := client.GetDocument("non-existing") if err != errors.ErrNotFound { - t.Errorf("%v", err) + t.Fatalf("%v", err) } if getDocFields2 != nil { - t.Errorf("expected content to see nil, saw %v", getDocFields2) + t.Fatalf("expected content to see nil, saw %v", getDocFields2) } // delete document - delCount, err := client.DeleteDocument([]string{"doc1"}) + delCount, err := client.DeleteDocument([]string{"enwiki_1"}) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expDelCount := 1 actDelCount := delCount if expDelCount != actDelCount { - t.Errorf("expected content to see %v, saw %v", expDelCount, actDelCount) + t.Fatalf("expected content to see %v, saw %v", expDelCount, actDelCount) } // get document - getDocFields1, err = client.GetDocument("doc1") + getDocFields1, err = client.GetDocument("enwiki_1") if err != errors.ErrNotFound { - t.Errorf("%v", err) + t.Fatalf("%v", err) } if getDocFields1 != nil { - t.Errorf("expected content to see nil, saw %v", getDocFields1) + t.Fatalf("expected content to see nil, saw %v", getDocFields1) } // delete non-existing document getDocFields1, err = client.GetDocument("non-existing") if err != errors.ErrNotFound { - t.Errorf("%v", err) + t.Fatalf("%v", err) } if getDocFields1 != nil { - t.Errorf("expected content to see nil, saw %v", getDocFields1) + t.Fatalf("expected content to see nil, saw %v", getDocFields1) } } @@ -1005,7 +995,7 @@ func TestServer_Search(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -1033,7 +1023,7 @@ func TestServer_Search(t *testing.T) { server.Stop() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // start server @@ -1057,40 +1047,40 @@ func TestServer_Search(t *testing.T) { } // put document - putDocs := make([]map[string]interface{}, 0) + putDocs := make([]*indexutils.Document, 0) putDocPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") // read index mapping file putDocFile1, err := os.Open(putDocPath1) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } defer func() { _ = putDocFile1.Close() }() putDocBytes1, err := ioutil.ReadAll(putDocFile1) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } var putDocFields1 map[string]interface{} err = json.Unmarshal(putDocBytes1, &putDocFields1) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - putDoc1 := map[string]interface{}{ - "id": "doc1", - "fields": putDocFields1, + putDoc1, err := indexutils.NewDocument("doc1", putDocFields1) + if err != nil { + t.Fatalf("%v", err) } putDocs = append(putDocs, putDoc1) putCount, err := client.IndexDocument(putDocs) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expPutCount := 1 actPutCount := putCount if expPutCount != actPutCount { - t.Errorf("expected content to see %v, saw %v", expPutCount, actPutCount) + t.Fatalf("expected content to see %v, saw %v", expPutCount, actPutCount) } // search @@ -1098,7 +1088,7 @@ func TestServer_Search(t *testing.T) { searchRequestFile, err := os.Open(searchRequestPath) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } defer func() { _ = searchRequestFile.Close() @@ -1106,23 +1096,23 @@ func TestServer_Search(t *testing.T) { searchRequestByte, err := ioutil.ReadAll(searchRequestFile) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } searchRequest := bleve.NewSearchRequest(nil) err = json.Unmarshal(searchRequestByte, searchRequest) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } searchResult1, err := client.Search(searchRequest) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expTotal := uint64(1) actTotal := searchResult1.Total if expTotal != actTotal { - t.Errorf("expected content to see %v, saw %v", expTotal, actTotal) + t.Fatalf("expected content to see %v, saw %v", expTotal, actTotal) } } @@ -1130,7 +1120,7 @@ func TestCluster_Start(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -1211,7 +1201,7 @@ func TestCluster_LivenessProbe(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -1293,54 +1283,54 @@ func TestCluster_LivenessProbe(t *testing.T) { _ = client1.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) defer func() { _ = client2.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) defer func() { _ = client3.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // liveness check for server1 liveness1, err := client1.LivenessProbe() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expLiveness1 := protobuf.LivenessProbeResponse_ALIVE.String() actLiveness1 := liveness1 if expLiveness1 != actLiveness1 { - t.Errorf("expected content to see %v, saw %v", expLiveness1, actLiveness1) + t.Fatalf("expected content to see %v, saw %v", expLiveness1, actLiveness1) } // liveness check for server2 liveness2, err := client2.LivenessProbe() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expLiveness2 := protobuf.LivenessProbeResponse_ALIVE.String() actLiveness2 := liveness2 if expLiveness2 != actLiveness2 { - t.Errorf("expected content to see %v, saw %v", expLiveness2, actLiveness2) + t.Fatalf("expected content to see %v, saw %v", expLiveness2, actLiveness2) } // liveness check for server3 liveness3, err := client3.LivenessProbe() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expLiveness3 := protobuf.LivenessProbeResponse_ALIVE.String() actLiveness3 := liveness3 if expLiveness3 != actLiveness3 { - t.Errorf("expected content to see %v, saw %v", expLiveness3, actLiveness3) + t.Fatalf("expected content to see %v, saw %v", expLiveness3, actLiveness3) } } @@ -1348,7 +1338,7 @@ func TestCluster_ReadinessProbe(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -1430,54 +1420,54 @@ func TestCluster_ReadinessProbe(t *testing.T) { _ = client1.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) defer func() { _ = client2.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) defer func() { _ = client3.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // readiness check for server1 readiness1, err := client1.ReadinessProbe() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expReadiness1 := protobuf.ReadinessProbeResponse_READY.String() actReadiness1 := readiness1 if expReadiness1 != actReadiness1 { - t.Errorf("expected content to see %v, saw %v", expReadiness1, actReadiness1) + t.Fatalf("expected content to see %v, saw %v", expReadiness1, actReadiness1) } // readiness check for server2 readiness2, err := client2.ReadinessProbe() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expReadiness2 := protobuf.ReadinessProbeResponse_READY.String() actReadiness2 := readiness2 if expReadiness2 != actReadiness2 { - t.Errorf("expected content to see %v, saw %v", expReadiness2, actReadiness2) + t.Fatalf("expected content to see %v, saw %v", expReadiness2, actReadiness2) } // readiness check for server3 readiness3, err := client3.ReadinessProbe() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expReadiness3 := protobuf.ReadinessProbeResponse_READY.String() actReadiness3 := readiness3 if expReadiness3 != actReadiness3 { - t.Errorf("expected content to see %v, saw %v", expReadiness3, actReadiness3) + t.Fatalf("expected content to see %v, saw %v", expReadiness3, actReadiness3) } } @@ -1485,7 +1475,7 @@ func TestCluster_GetNode(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -1567,27 +1557,27 @@ func TestCluster_GetNode(t *testing.T) { _ = client1.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) defer func() { _ = client2.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) defer func() { _ = client3.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // get all node info from all nodes node11, err := client1.GetNode(nodeConfig1.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNode11 := map[string]interface{}{ "node_config": server1.nodeConfig.ToMap(), @@ -1595,12 +1585,12 @@ func TestCluster_GetNode(t *testing.T) { } actNode11 := node11 if !reflect.DeepEqual(expNode11, actNode11) { - t.Errorf("expected content to see %v, saw %v", expNode11, actNode11) + t.Fatalf("expected content to see %v, saw %v", expNode11, actNode11) } node12, err := client1.GetNode(nodeConfig2.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNode12 := map[string]interface{}{ "node_config": server2.nodeConfig.ToMap(), @@ -1608,12 +1598,12 @@ func TestCluster_GetNode(t *testing.T) { } actNode12 := node12 if !reflect.DeepEqual(expNode12, actNode12) { - t.Errorf("expected content to see %v, saw %v", expNode12, actNode12) + t.Fatalf("expected content to see %v, saw %v", expNode12, actNode12) } node13, err := client1.GetNode(nodeConfig3.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNode13 := map[string]interface{}{ "node_config": server3.nodeConfig.ToMap(), @@ -1621,12 +1611,12 @@ func TestCluster_GetNode(t *testing.T) { } actNode13 := node13 if !reflect.DeepEqual(expNode13, actNode13) { - t.Errorf("expected content to see %v, saw %v", expNode13, actNode13) + t.Fatalf("expected content to see %v, saw %v", expNode13, actNode13) } node21, err := client2.GetNode(nodeConfig1.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNode21 := map[string]interface{}{ "node_config": server1.nodeConfig.ToMap(), @@ -1634,12 +1624,12 @@ func TestCluster_GetNode(t *testing.T) { } actNode21 := node21 if !reflect.DeepEqual(expNode21, actNode21) { - t.Errorf("expected content to see %v, saw %v", expNode21, actNode21) + t.Fatalf("expected content to see %v, saw %v", expNode21, actNode21) } node22, err := client2.GetNode(nodeConfig2.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNode22 := map[string]interface{}{ "node_config": server2.nodeConfig.ToMap(), @@ -1647,12 +1637,12 @@ func TestCluster_GetNode(t *testing.T) { } actNode22 := node22 if !reflect.DeepEqual(expNode22, actNode22) { - t.Errorf("expected content to see %v, saw %v", expNode22, actNode22) + t.Fatalf("expected content to see %v, saw %v", expNode22, actNode22) } node23, err := client2.GetNode(nodeConfig3.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNode23 := map[string]interface{}{ "node_config": server3.nodeConfig.ToMap(), @@ -1660,12 +1650,12 @@ func TestCluster_GetNode(t *testing.T) { } actNode23 := node23 if !reflect.DeepEqual(expNode23, actNode23) { - t.Errorf("expected content to see %v, saw %v", expNode23, actNode23) + t.Fatalf("expected content to see %v, saw %v", expNode23, actNode23) } node31, err := client3.GetNode(nodeConfig1.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNode31 := map[string]interface{}{ "node_config": server1.nodeConfig.ToMap(), @@ -1673,12 +1663,12 @@ func TestCluster_GetNode(t *testing.T) { } actNode31 := node31 if !reflect.DeepEqual(expNode31, actNode31) { - t.Errorf("expected content to see %v, saw %v", expNode31, actNode31) + t.Fatalf("expected content to see %v, saw %v", expNode31, actNode31) } node32, err := client3.GetNode(nodeConfig2.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNode32 := map[string]interface{}{ "node_config": server2.nodeConfig.ToMap(), @@ -1686,12 +1676,12 @@ func TestCluster_GetNode(t *testing.T) { } actNode32 := node32 if !reflect.DeepEqual(expNode32, actNode32) { - t.Errorf("expected content to see %v, saw %v", expNode32, actNode32) + t.Fatalf("expected content to see %v, saw %v", expNode32, actNode32) } node33, err := client3.GetNode(nodeConfig3.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNode33 := map[string]interface{}{ "node_config": server3.nodeConfig.ToMap(), @@ -1699,7 +1689,7 @@ func TestCluster_GetNode(t *testing.T) { } actNode33 := node33 if !reflect.DeepEqual(expNode33, actNode33) { - t.Errorf("expected content to see %v, saw %v", expNode33, actNode33) + t.Fatalf("expected content to see %v, saw %v", expNode33, actNode33) } } @@ -1707,7 +1697,7 @@ func TestCluster_GetCluster(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -1789,27 +1779,27 @@ func TestCluster_GetCluster(t *testing.T) { _ = client1.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) defer func() { _ = client2.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) defer func() { _ = client3.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // get cluster info from all servers cluster1, err := client1.GetCluster() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expCluster1 := map[string]interface{}{ nodeConfig1.NodeId: map[string]interface{}{ @@ -1827,12 +1817,12 @@ func TestCluster_GetCluster(t *testing.T) { } actCluster1 := cluster1 if !reflect.DeepEqual(expCluster1, actCluster1) { - t.Errorf("expected content to see %v, saw %v", expCluster1, actCluster1) + t.Fatalf("expected content to see %v, saw %v", expCluster1, actCluster1) } cluster2, err := client2.GetCluster() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expCluster2 := map[string]interface{}{ nodeConfig1.NodeId: map[string]interface{}{ @@ -1850,12 +1840,12 @@ func TestCluster_GetCluster(t *testing.T) { } actCluster2 := cluster2 if !reflect.DeepEqual(expCluster2, actCluster2) { - t.Errorf("expected content to see %v, saw %v", expCluster2, actCluster2) + t.Fatalf("expected content to see %v, saw %v", expCluster2, actCluster2) } cluster3, err := client3.GetCluster() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expCluster3 := map[string]interface{}{ nodeConfig1.NodeId: map[string]interface{}{ @@ -1873,6 +1863,6 @@ func TestCluster_GetCluster(t *testing.T) { } actCluster3 := cluster3 if !reflect.DeepEqual(expCluster3, actCluster3) { - t.Errorf("expected content to see %v, saw %v", expCluster3, actCluster3) + t.Fatalf("expected content to see %v, saw %v", expCluster3, actCluster3) } } diff --git a/indexutils/document.go b/indexutils/document.go new file mode 100644 index 0000000..b550647 --- /dev/null +++ b/indexutils/document.go @@ -0,0 +1,65 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package indexutils + +import ( + "encoding/json" + "errors" +) + +type Document struct { + Id string `json:"id,omitempty"` + Fields map[string]interface{} `json:"fields,omitempty"` +} + +func NewDocument(id string, fields map[string]interface{}) (*Document, error) { + doc := &Document{ + Id: id, + Fields: fields, + } + + if err := doc.Validate(); err != nil { + return nil, err + } + + return doc, nil +} + +func NewDocumentFromBytes(src []byte) (*Document, error) { + var doc *Document + + err := json.Unmarshal(src, &doc) + if err != nil { + return nil, err + } + + if err := doc.Validate(); err != nil { + return nil, err + } + + return doc, nil +} + +func (d *Document) Validate() error { + if d.Id == "" { + return errors.New("id is empty") + } + + if len(d.Fields) <= 0 { + return errors.New("fields are empty") + } + + return nil +} diff --git a/manager/server_test.go b/manager/server_test.go index 77e8eff..bc6113f 100644 --- a/manager/server_test.go +++ b/manager/server_test.go @@ -35,7 +35,7 @@ func TestServer_Start(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -80,7 +80,7 @@ func TestServer_LivenessProbe(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -150,7 +150,7 @@ func TestServer_ReadinessProbe(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -220,7 +220,7 @@ func TestServer_GetNode(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -293,7 +293,7 @@ func TestServer_GetCluster(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -368,7 +368,7 @@ func TestServer_GetIndexMapping(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -446,7 +446,7 @@ func TestServer_GetIndexType(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -519,7 +519,7 @@ func TestServer_GetIndexStorageType(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -592,7 +592,7 @@ func TestServer_SetState(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -671,7 +671,7 @@ func TestServer_GetState(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -750,7 +750,7 @@ func TestServer_DeleteState(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -850,7 +850,7 @@ func TestCluster_Start(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -931,7 +931,7 @@ func TestCluster_LivenessProbe(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -1068,7 +1068,7 @@ func TestCluster_ReadinessProbe(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -1205,7 +1205,7 @@ func TestCluster_GetNode(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -1427,7 +1427,7 @@ func TestCluster_GetCluster(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -1601,7 +1601,7 @@ func TestCluster_GetState(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -1736,7 +1736,7 @@ func TestCluster_SetState(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -1945,7 +1945,7 @@ func TestCluster_DeleteState(t *testing.T) { curDir, _ := os.Getwd() // create logger - logger := logutils.NewLogger("DEBUG", "", 500, 3, 30, false) + logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) From 63d714cabe3c6a0d2d64965e64fbb2db840e553a Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Fri, 26 Jul 2019 20:42:13 +0900 Subject: [PATCH 03/76] Split protobuf into components (#84) --- cmd/blast/cluster_delete.go | 4 +- cmd/blast/cluster_get.go | 4 +- cmd/blast/cluster_node_health.go | 4 +- cmd/blast/cluster_node_info.go | 4 +- cmd/blast/cluster_node_leave.go | 4 +- cmd/blast/cluster_node_snapshot.go | 4 +- cmd/blast/cluster_peers_info.go | 4 +- cmd/blast/cluster_peers_watch.go | 6 +- cmd/blast/cluster_set.go | 4 +- cmd/blast/cluster_watch.go | 4 +- cmd/blast/distributor_delete.go | 4 +- cmd/blast/distributor_get.go | 4 +- cmd/blast/distributor_index.go | 4 +- cmd/blast/distributor_node_health.go | 4 +- cmd/blast/distributor_search.go | 4 +- cmd/blast/indexer_delete.go | 4 +- cmd/blast/indexer_get.go | 4 +- cmd/blast/indexer_index.go | 4 +- cmd/blast/indexer_node_health.go | 4 +- cmd/blast/indexer_node_info.go | 4 +- cmd/blast/indexer_node_leave.go | 4 +- cmd/blast/indexer_node_snapshot.go | 4 +- cmd/blast/indexer_peers_info.go | 4 +- cmd/blast/indexer_peers_watch.go | 4 +- cmd/blast/indexer_search.go | 4 +- dispatcher/grpc_client.go | 241 ++++ dispatcher/grpc_server.go | 94 ++ dispatcher/grpc_service.go | 82 +- dispatcher/http_handler.go | 47 +- dispatcher/http_server.go | 69 ++ dispatcher/server.go | 12 +- dispatcher/server_test.go | 7 +- grpc/service.go | 114 -- grpc/client.go => indexer/grpc_client.go | 211 +--- grpc/server.go => indexer/grpc_server.go | 18 +- indexer/grpc_service.go | 122 +- indexer/http_handler.go | 44 +- http/server.go => indexer/http_server.go | 12 +- indexer/index.go | 8 +- indexer/raft_fsm.go | 4 +- indexer/raft_server.go | 3 +- indexer/server.go | 21 +- indexer/server_test.go | 70 +- manager/grpc_client.go | 294 +++++ manager/grpc_server.go | 94 ++ manager/grpc_service.go | 121 +- manager/http_router.go | 40 +- http/router.go => manager/http_server.go | 46 +- manager/raft_fsm_test.go | 126 +- manager/server.go | 14 +- manager/server_test.go | 387 +++--- maputils/maputils_test.go | 126 +- protobuf/distribute/distribute.pb.go | 843 +++++++++++++ protobuf/distribute/distribute.proto | 85 ++ protobuf/{blast.pb.go => index/index.pb.go} | 983 ++++----------- protobuf/{blast.proto => index/index.proto} | 45 +- protobuf/management/management.pb.go | 1223 +++++++++++++++++++ protobuf/management/management.proto | 118 ++ protobuf/util_test.go | 68 +- 59 files changed, 4191 insertions(+), 1703 deletions(-) create mode 100644 dispatcher/grpc_client.go create mode 100644 dispatcher/grpc_server.go create mode 100644 dispatcher/http_server.go delete mode 100644 grpc/service.go rename grpc/client.go => indexer/grpc_client.go (64%) rename grpc/server.go => indexer/grpc_server.go (87%) rename http/server.go => indexer/http_server.go (82%) create mode 100644 manager/grpc_client.go create mode 100644 manager/grpc_server.go rename http/router.go => manager/http_server.go (52%) create mode 100644 protobuf/distribute/distribute.pb.go create mode 100644 protobuf/distribute/distribute.proto rename protobuf/{blast.pb.go => index/index.pb.go} (54%) rename protobuf/{blast.proto => index/index.proto} (79%) create mode 100644 protobuf/management/management.pb.go create mode 100644 protobuf/management/management.proto diff --git a/cmd/blast/cluster_delete.go b/cmd/blast/cluster_delete.go index 2cdff16..600c70f 100644 --- a/cmd/blast/cluster_delete.go +++ b/cmd/blast/cluster_delete.go @@ -19,7 +19,7 @@ import ( "fmt" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/manager" "github.com/urfave/cli" ) @@ -32,7 +32,7 @@ func clusterDelete(c *cli.Context) error { return err } - client, err := grpc.NewClient(grpcAddr) + client, err := manager.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/cluster_get.go b/cmd/blast/cluster_get.go index ee60520..fff65e3 100644 --- a/cmd/blast/cluster_get.go +++ b/cmd/blast/cluster_get.go @@ -19,7 +19,7 @@ import ( "fmt" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/manager" "github.com/urfave/cli" ) @@ -28,7 +28,7 @@ func clusterGet(c *cli.Context) error { key := c.Args().Get(0) - client, err := grpc.NewClient(grpcAddr) + client, err := manager.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/cluster_node_health.go b/cmd/blast/cluster_node_health.go index 79bc6f8..39294b7 100644 --- a/cmd/blast/cluster_node_health.go +++ b/cmd/blast/cluster_node_health.go @@ -18,7 +18,7 @@ import ( "fmt" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/manager" "github.com/urfave/cli" ) @@ -27,7 +27,7 @@ func clusterNodeHealth(c *cli.Context) error { liveness := c.Bool("liveness") readiness := c.Bool("readiness") - client, err := grpc.NewClient(grpcAddr) + client, err := manager.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/cluster_node_info.go b/cmd/blast/cluster_node_info.go index ba93b13..517c72e 100644 --- a/cmd/blast/cluster_node_info.go +++ b/cmd/blast/cluster_node_info.go @@ -19,7 +19,7 @@ import ( "fmt" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/manager" "github.com/urfave/cli" ) @@ -38,7 +38,7 @@ func clusterNodeInfo(c *cli.Context) error { nodeId := c.Args().Get(0) - client, err := grpc.NewClient(grpcAddr) + client, err := manager.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/cluster_node_leave.go b/cmd/blast/cluster_node_leave.go index bfd151e..8250238 100644 --- a/cmd/blast/cluster_node_leave.go +++ b/cmd/blast/cluster_node_leave.go @@ -18,7 +18,7 @@ import ( "fmt" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/manager" "github.com/urfave/cli" ) @@ -36,7 +36,7 @@ func clusterNodeLeave(c *cli.Context) error { grpcAddr := c.String("grpc-address") nodeId := c.String("node-id") - client, err := grpc.NewClient(grpcAddr) + client, err := manager.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/cluster_node_snapshot.go b/cmd/blast/cluster_node_snapshot.go index 09428cb..ce9d62f 100644 --- a/cmd/blast/cluster_node_snapshot.go +++ b/cmd/blast/cluster_node_snapshot.go @@ -18,14 +18,14 @@ import ( "fmt" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/manager" "github.com/urfave/cli" ) func clusterNodeSnapshot(c *cli.Context) error { grpcAddr := c.String("grpc-address") - client, err := grpc.NewClient(grpcAddr) + client, err := manager.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/cluster_peers_info.go b/cmd/blast/cluster_peers_info.go index 517a08d..6d4ea48 100644 --- a/cmd/blast/cluster_peers_info.go +++ b/cmd/blast/cluster_peers_info.go @@ -19,14 +19,14 @@ import ( "fmt" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/manager" "github.com/urfave/cli" ) func clusterPeersInfo(c *cli.Context) error { grpcAddr := c.String("grpc-address") - client, err := grpc.NewClient(grpcAddr) + client, err := manager.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/cluster_peers_watch.go b/cmd/blast/cluster_peers_watch.go index ebf9c8e..59d5d35 100644 --- a/cmd/blast/cluster_peers_watch.go +++ b/cmd/blast/cluster_peers_watch.go @@ -22,7 +22,7 @@ import ( "log" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/manager" "github.com/mosuka/blast/protobuf" "github.com/urfave/cli" ) @@ -30,7 +30,7 @@ import ( func clusterPeersWatch(c *cli.Context) error { grpcAddr := c.String("grpc-address") - client, err := grpc.NewClient(grpcAddr) + client, err := manager.NewGRPCClient(grpcAddr) if err != nil { return err } @@ -41,7 +41,7 @@ func clusterPeersWatch(c *cli.Context) error { } }() - err = indexerPeersInfo(c) + err = clusterPeersInfo(c) if err != nil { return err } diff --git a/cmd/blast/cluster_set.go b/cmd/blast/cluster_set.go index 4ac4328..55e51f0 100644 --- a/cmd/blast/cluster_set.go +++ b/cmd/blast/cluster_set.go @@ -20,7 +20,7 @@ import ( "fmt" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/manager" "github.com/urfave/cli" ) @@ -50,7 +50,7 @@ func clusterSet(c *cli.Context) error { } } - client, err := grpc.NewClient(grpcAddr) + client, err := manager.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/cluster_watch.go b/cmd/blast/cluster_watch.go index e1d2546..71d2dc2 100644 --- a/cmd/blast/cluster_watch.go +++ b/cmd/blast/cluster_watch.go @@ -22,7 +22,7 @@ import ( "log" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/manager" "github.com/mosuka/blast/protobuf" "github.com/urfave/cli" ) @@ -32,7 +32,7 @@ func clusterWatch(c *cli.Context) error { key := c.Args().Get(0) - client, err := grpc.NewClient(grpcAddr) + client, err := manager.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/distributor_delete.go b/cmd/blast/distributor_delete.go index 76e7a82..222af64 100644 --- a/cmd/blast/distributor_delete.go +++ b/cmd/blast/distributor_delete.go @@ -21,7 +21,7 @@ import ( "io" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/dispatcher" "github.com/urfave/cli" ) @@ -77,7 +77,7 @@ func distributorDelete(c *cli.Context) error { } // create client - client, err := grpc.NewClient(grpcAddr) + client, err := dispatcher.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/distributor_get.go b/cmd/blast/distributor_get.go index 5dbe684..e7f0cd1 100644 --- a/cmd/blast/distributor_get.go +++ b/cmd/blast/distributor_get.go @@ -20,7 +20,7 @@ import ( "fmt" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/dispatcher" "github.com/urfave/cli" ) @@ -32,7 +32,7 @@ func distributorGet(c *cli.Context) error { return err } - client, err := grpc.NewClient(grpcAddr) + client, err := dispatcher.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/distributor_index.go b/cmd/blast/distributor_index.go index 795cb2f..ce78c90 100644 --- a/cmd/blast/distributor_index.go +++ b/cmd/blast/distributor_index.go @@ -22,7 +22,7 @@ import ( "io/ioutil" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/dispatcher" "github.com/mosuka/blast/indexutils" "github.com/urfave/cli" ) @@ -113,7 +113,7 @@ func distributorIndex(c *cli.Context) error { } // create gRPC client - client, err := grpc.NewClient(grpcAddr) + client, err := dispatcher.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/distributor_node_health.go b/cmd/blast/distributor_node_health.go index a25c357..cf0b51e 100644 --- a/cmd/blast/distributor_node_health.go +++ b/cmd/blast/distributor_node_health.go @@ -18,7 +18,7 @@ import ( "fmt" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/dispatcher" "github.com/urfave/cli" ) @@ -27,7 +27,7 @@ func distributorNodeHealth(c *cli.Context) error { liveness := c.Bool("liveness") readiness := c.Bool("readiness") - client, err := grpc.NewClient(grpcAddr) + client, err := dispatcher.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/distributor_search.go b/cmd/blast/distributor_search.go index c50163a..b9494cc 100644 --- a/cmd/blast/distributor_search.go +++ b/cmd/blast/distributor_search.go @@ -21,7 +21,7 @@ import ( "os" "github.com/blevesearch/bleve" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/dispatcher" "github.com/urfave/cli" ) @@ -66,7 +66,7 @@ func distributorSearch(c *cli.Context) error { } } - client, err := grpc.NewClient(grpcAddr) + client, err := dispatcher.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/indexer_delete.go b/cmd/blast/indexer_delete.go index 680d7e5..7c1e1bd 100644 --- a/cmd/blast/indexer_delete.go +++ b/cmd/blast/indexer_delete.go @@ -21,7 +21,7 @@ import ( "io" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/indexer" "github.com/urfave/cli" ) @@ -77,7 +77,7 @@ func indexerDelete(c *cli.Context) error { } // create client - client, err := grpc.NewClient(grpcAddr) + client, err := indexer.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/indexer_get.go b/cmd/blast/indexer_get.go index 148b062..5a59a3e 100644 --- a/cmd/blast/indexer_get.go +++ b/cmd/blast/indexer_get.go @@ -20,7 +20,7 @@ import ( "fmt" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/indexer" "github.com/urfave/cli" ) @@ -32,7 +32,7 @@ func indexerGet(c *cli.Context) error { return err } - client, err := grpc.NewClient(grpcAddr) + client, err := indexer.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/indexer_index.go b/cmd/blast/indexer_index.go index 0b73e71..9b53711 100644 --- a/cmd/blast/indexer_index.go +++ b/cmd/blast/indexer_index.go @@ -22,7 +22,7 @@ import ( "io/ioutil" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/indexer" "github.com/mosuka/blast/indexutils" "github.com/urfave/cli" ) @@ -113,7 +113,7 @@ func indexerIndex(c *cli.Context) error { } // create gRPC client - client, err := grpc.NewClient(grpcAddr) + client, err := indexer.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/indexer_node_health.go b/cmd/blast/indexer_node_health.go index 478e3ce..beab1c0 100644 --- a/cmd/blast/indexer_node_health.go +++ b/cmd/blast/indexer_node_health.go @@ -18,7 +18,7 @@ import ( "fmt" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/indexer" "github.com/urfave/cli" ) @@ -27,7 +27,7 @@ func indexerNodeHealth(c *cli.Context) error { liveness := c.Bool("liveness") readiness := c.Bool("readiness") - client, err := grpc.NewClient(grpcAddr) + client, err := indexer.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/indexer_node_info.go b/cmd/blast/indexer_node_info.go index 72c964e..ce35cd1 100644 --- a/cmd/blast/indexer_node_info.go +++ b/cmd/blast/indexer_node_info.go @@ -19,7 +19,7 @@ import ( "fmt" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/indexer" "github.com/urfave/cli" ) @@ -38,7 +38,7 @@ func indexerNodeInfo(c *cli.Context) error { nodeId := c.Args().Get(0) - client, err := grpc.NewClient(grpcAddr) + client, err := indexer.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/indexer_node_leave.go b/cmd/blast/indexer_node_leave.go index 7f150ea..5255586 100644 --- a/cmd/blast/indexer_node_leave.go +++ b/cmd/blast/indexer_node_leave.go @@ -18,7 +18,7 @@ import ( "fmt" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/indexer" "github.com/urfave/cli" ) @@ -36,7 +36,7 @@ func indexerNodeLeave(c *cli.Context) error { grpcAddr := c.String("grpc-address") nodeId := c.String("node-id") - client, err := grpc.NewClient(grpcAddr) + client, err := indexer.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/indexer_node_snapshot.go b/cmd/blast/indexer_node_snapshot.go index 68f3938..d59ab92 100644 --- a/cmd/blast/indexer_node_snapshot.go +++ b/cmd/blast/indexer_node_snapshot.go @@ -18,14 +18,14 @@ import ( "fmt" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/indexer" "github.com/urfave/cli" ) func indexerNodeSnapshot(c *cli.Context) error { grpcAddr := c.String("grpc-address") - client, err := grpc.NewClient(grpcAddr) + client, err := indexer.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/indexer_peers_info.go b/cmd/blast/indexer_peers_info.go index a438981..f798ea7 100644 --- a/cmd/blast/indexer_peers_info.go +++ b/cmd/blast/indexer_peers_info.go @@ -19,14 +19,14 @@ import ( "fmt" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/indexer" "github.com/urfave/cli" ) func indexerPeersInfo(c *cli.Context) error { grpcAddr := c.String("grpc-address") - client, err := grpc.NewClient(grpcAddr) + client, err := indexer.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/indexer_peers_watch.go b/cmd/blast/indexer_peers_watch.go index 1fcd637..1169a9d 100644 --- a/cmd/blast/indexer_peers_watch.go +++ b/cmd/blast/indexer_peers_watch.go @@ -22,7 +22,7 @@ import ( "log" "os" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/indexer" "github.com/mosuka/blast/protobuf" "github.com/urfave/cli" ) @@ -30,7 +30,7 @@ import ( func indexerPeersWatch(c *cli.Context) error { grpcAddr := c.String("grpc-address") - client, err := grpc.NewClient(grpcAddr) + client, err := indexer.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/cmd/blast/indexer_search.go b/cmd/blast/indexer_search.go index c73d821..5c250dc 100644 --- a/cmd/blast/indexer_search.go +++ b/cmd/blast/indexer_search.go @@ -21,7 +21,7 @@ import ( "os" "github.com/blevesearch/bleve" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/indexer" "github.com/urfave/cli" ) @@ -66,7 +66,7 @@ func indexerSearch(c *cli.Context) error { } } - client, err := grpc.NewClient(grpcAddr) + client, err := indexer.NewGRPCClient(grpcAddr) if err != nil { return err } diff --git a/dispatcher/grpc_client.go b/dispatcher/grpc_client.go new file mode 100644 index 0000000..a042b07 --- /dev/null +++ b/dispatcher/grpc_client.go @@ -0,0 +1,241 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dispatcher + +import ( + "context" + "errors" + "math" + + "github.com/blevesearch/bleve" + "github.com/golang/protobuf/ptypes/any" + "github.com/golang/protobuf/ptypes/empty" + blasterrors "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/indexutils" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/distribute" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type GRPCClient struct { + ctx context.Context + cancel context.CancelFunc + conn *grpc.ClientConn + client distribute.DistributeClient +} + +func NewGRPCContext() (context.Context, context.CancelFunc) { + baseCtx := context.TODO() + //return context.WithTimeout(baseCtx, 60*time.Second) + return context.WithCancel(baseCtx) +} + +func NewGRPCClient(address string) (*GRPCClient, error) { + ctx, cancel := NewGRPCContext() + + //streamRetryOpts := []grpc_retry.CallOption{ + // grpc_retry.Disable(), + //} + + //unaryRetryOpts := []grpc_retry.CallOption{ + // grpc_retry.WithBackoff(grpc_retry.BackoffLinear(100 * time.Millisecond)), + // grpc_retry.WithCodes(codes.Unavailable), + // grpc_retry.WithMax(100), + //} + + dialOpts := []grpc.DialOption{ + grpc.WithInsecure(), + grpc.WithDefaultCallOptions( + grpc.MaxCallSendMsgSize(math.MaxInt32), + grpc.MaxCallRecvMsgSize(math.MaxInt32), + ), + //grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(streamRetryOpts...)), + //grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(unaryRetryOpts...)), + } + + conn, err := grpc.DialContext(ctx, address, dialOpts...) + if err != nil { + return nil, err + } + + return &GRPCClient{ + ctx: ctx, + cancel: cancel, + conn: conn, + client: distribute.NewDistributeClient(conn), + }, nil +} + +func (c *GRPCClient) Cancel() { + c.cancel() +} + +func (c *GRPCClient) Close() error { + c.Cancel() + if c.conn != nil { + return c.conn.Close() + } + + return c.ctx.Err() +} + +func (c *GRPCClient) GetAddress() string { + return c.conn.Target() +} + +func (c *GRPCClient) LivenessProbe(opts ...grpc.CallOption) (string, error) { + resp, err := c.client.LivenessProbe(c.ctx, &empty.Empty{}) + if err != nil { + st, _ := status.FromError(err) + + return distribute.LivenessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) + } + + return resp.State.String(), nil +} + +func (c *GRPCClient) ReadinessProbe(opts ...grpc.CallOption) (string, error) { + resp, err := c.client.ReadinessProbe(c.ctx, &empty.Empty{}) + if err != nil { + st, _ := status.FromError(err) + + return distribute.ReadinessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) + } + + return resp.State.String(), nil +} + +func (c *GRPCClient) GetDocument(id string, opts ...grpc.CallOption) (map[string]interface{}, error) { + req := &distribute.GetDocumentRequest{ + Id: id, + } + + resp, err := c.client.GetDocument(c.ctx, req, opts...) + if err != nil { + st, _ := status.FromError(err) + + switch st.Code() { + case codes.NotFound: + return nil, blasterrors.ErrNotFound + default: + return nil, errors.New(st.Message()) + } + } + + ins, err := protobuf.MarshalAny(resp.Fields) + fields := *ins.(*map[string]interface{}) + + return fields, nil +} + +func (c *GRPCClient) Search(searchRequest *bleve.SearchRequest, opts ...grpc.CallOption) (*bleve.SearchResult, error) { + // bleve.SearchRequest -> Any + searchRequestAny := &any.Any{} + err := protobuf.UnmarshalAny(searchRequest, searchRequestAny) + if err != nil { + return nil, err + } + + req := &distribute.SearchRequest{ + SearchRequest: searchRequestAny, + } + + resp, err := c.client.Search(c.ctx, req, opts...) + if err != nil { + st, _ := status.FromError(err) + + return nil, errors.New(st.Message()) + } + + // Any -> bleve.SearchResult + searchResultInstance, err := protobuf.MarshalAny(resp.SearchResult) + if err != nil { + st, _ := status.FromError(err) + + return nil, errors.New(st.Message()) + } + if searchResultInstance == nil { + return nil, errors.New("nil") + } + searchResult := searchResultInstance.(*bleve.SearchResult) + + return searchResult, nil +} + +func (c *GRPCClient) IndexDocument(docs []*indexutils.Document, opts ...grpc.CallOption) (int, error) { + stream, err := c.client.IndexDocument(c.ctx, opts...) + if err != nil { + st, _ := status.FromError(err) + + return -1, errors.New(st.Message()) + } + + for _, doc := range docs { + id := doc.Id + fields := doc.Fields + + fieldsAny := &any.Any{} + err := protobuf.UnmarshalAny(&fields, fieldsAny) + if err != nil { + return -1, err + } + + req := &distribute.IndexDocumentRequest{ + Id: id, + Fields: fieldsAny, + } + + err = stream.Send(req) + if err != nil { + return -1, err + } + } + + resp, err := stream.CloseAndRecv() + if err != nil { + return -1, err + } + + return int(resp.Count), nil +} + +func (c *GRPCClient) DeleteDocument(ids []string, opts ...grpc.CallOption) (int, error) { + stream, err := c.client.DeleteDocument(c.ctx, opts...) + if err != nil { + st, _ := status.FromError(err) + + return -1, errors.New(st.Message()) + } + + for _, id := range ids { + req := &distribute.DeleteDocumentRequest{ + Id: id, + } + + err := stream.Send(req) + if err != nil { + return -1, err + } + } + + resp, err := stream.CloseAndRecv() + if err != nil { + return -1, err + } + + return int(resp.Count), nil +} diff --git a/dispatcher/grpc_server.go b/dispatcher/grpc_server.go new file mode 100644 index 0000000..7bc684e --- /dev/null +++ b/dispatcher/grpc_server.go @@ -0,0 +1,94 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dispatcher + +import ( + "net" + + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/mosuka/blast/protobuf/distribute" + "go.uber.org/zap" + "google.golang.org/grpc" + //grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" + //grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" + //grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags" + //grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" +) + +type GRPCServer struct { + service distribute.DistributeServer + server *grpc.Server + listener net.Listener + + logger *zap.Logger +} + +func NewGRPCServer(grpcAddr string, service distribute.DistributeServer, logger *zap.Logger) (*GRPCServer, error) { + server := grpc.NewServer( + grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( + //grpc_ctxtags.StreamServerInterceptor(), + //grpc_opentracing.StreamServerInterceptor(), + grpc_prometheus.StreamServerInterceptor, + grpc_zap.StreamServerInterceptor(logger), + //grpc_auth.StreamServerInterceptor(myAuthFunction), + //grpc_recovery.StreamServerInterceptor(), + )), + grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( + //grpc_ctxtags.UnaryServerInterceptor(), + //grpc_opentracing.UnaryServerInterceptor(), + grpc_prometheus.UnaryServerInterceptor, + grpc_zap.UnaryServerInterceptor(logger), + //grpc_auth.UnaryServerInterceptor(myAuthFunction), + //grpc_recovery.UnaryServerInterceptor(), + )), + ) + + distribute.RegisterDistributeServer(server, service) + + grpc_prometheus.EnableHandlingTimeHistogram() + grpc_prometheus.Register(server) + + listener, err := net.Listen("tcp", grpcAddr) + if err != nil { + return nil, err + } + + return &GRPCServer{ + service: service, + server: server, + listener: listener, + logger: logger, + }, nil +} + +func (s *GRPCServer) Start() error { + s.logger.Info("start server") + err := s.server.Serve(s.listener) + if err != nil { + return err + } + + return nil +} + +func (s *GRPCServer) Stop() error { + s.logger.Info("stop server") + s.server.Stop() + //s.server.GracefulStop() + + return nil +} diff --git a/dispatcher/grpc_service.go b/dispatcher/grpc_service.go index 770bbd9..6f85872 100644 --- a/dispatcher/grpc_service.go +++ b/dispatcher/grpc_service.go @@ -25,14 +25,16 @@ import ( "sync" "time" - "github.com/mosuka/blast/indexutils" - "github.com/blevesearch/bleve" "github.com/blevesearch/bleve/search" "github.com/golang/protobuf/ptypes/any" + "github.com/golang/protobuf/ptypes/empty" "github.com/hashicorp/raft" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/indexer" + "github.com/mosuka/blast/indexutils" + "github.com/mosuka/blast/manager" "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/distribute" "github.com/mosuka/blast/sortutils" "go.uber.org/zap" "google.golang.org/grpc/codes" @@ -40,18 +42,16 @@ import ( ) type GRPCService struct { - *grpc.Service - managerAddr string logger *zap.Logger managers map[string]interface{} - managerClients map[string]*grpc.Client + managerClients map[string]*manager.GRPCClient updateManagersStopCh chan struct{} updateManagersDoneCh chan struct{} indexers map[string]interface{} - indexerClients map[string]map[string]*grpc.Client + indexerClients map[string]map[string]*indexer.GRPCClient updateIndexersStopCh chan struct{} updateIndexersDoneCh chan struct{} } @@ -62,10 +62,10 @@ func NewGRPCService(managerAddr string, logger *zap.Logger) (*GRPCService, error logger: logger, managers: make(map[string]interface{}, 0), - managerClients: make(map[string]*grpc.Client, 0), + managerClients: make(map[string]*manager.GRPCClient, 0), indexers: make(map[string]interface{}, 0), - indexerClients: make(map[string]map[string]*grpc.Client, 0), + indexerClients: make(map[string]map[string]*indexer.GRPCClient, 0), }, nil } @@ -89,8 +89,8 @@ func (s *GRPCService) Stop() error { return nil } -func (s *GRPCService) getManagerClient() (*grpc.Client, error) { - var client *grpc.Client +func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { + var client *manager.GRPCClient for id, node := range s.managers { nm, ok := node.(map[string]interface{}) @@ -124,7 +124,7 @@ func (s *GRPCService) getManagerClient() (*grpc.Client, error) { } func (s *GRPCService) getInitialManagers(managerAddr string) (map[string]interface{}, error) { - client, err := grpc.NewClient(s.managerAddr) + client, err := manager.NewGRPCClient(s.managerAddr) defer func() { err := client.Close() if err != nil { @@ -185,7 +185,7 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { } s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - client, err := grpc.NewClient(grpcAddr) + client, err := manager.NewGRPCClient(grpcAddr) if err != nil { s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) } @@ -266,7 +266,7 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.logger.Error(err.Error(), zap.String("node_id", nodeId)) } - newClient, err := grpc.NewClient(grpcAddr) + newClient, err := manager.NewGRPCClient(grpcAddr) if err != nil { s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) } @@ -281,7 +281,7 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - newClient, err := grpc.NewClient(grpcAddr) + newClient, err := manager.NewGRPCClient(grpcAddr) if err != nil { s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) } @@ -403,12 +403,12 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { } s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - client, err := grpc.NewClient(metadata["grpc_addr"].(string)) + client, err := indexer.NewGRPCClient(metadata["grpc_addr"].(string)) if err != nil { s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) } if _, exist := s.indexerClients[clusterId]; !exist { - s.indexerClients[clusterId] = make(map[string]*grpc.Client) + s.indexerClients[clusterId] = make(map[string]*indexer.GRPCClient) } s.indexerClients[clusterId][nodeId] = client } @@ -504,7 +504,7 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { s.logger.Error(err.Error(), zap.String("node_id", nodeId)) } - newClient, err := grpc.NewClient(grpcAddr) + newClient, err := indexer.NewGRPCClient(grpcAddr) if err != nil { s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) } @@ -518,12 +518,12 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - newClient, err := grpc.NewClient(nodeConfig["grpc_addr"].(string)) + newClient, err := indexer.NewGRPCClient(nodeConfig["grpc_addr"].(string)) if err != nil { s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) } if _, exist := s.indexerClients[clusterId]; !exist { - s.indexerClients[clusterId] = make(map[string]*grpc.Client) + s.indexerClients[clusterId] = make(map[string]*indexer.GRPCClient) } s.indexerClients[clusterId][nodeId] = newClient } @@ -557,8 +557,8 @@ func (s *GRPCService) stopUpdateIndexers() { s.logger.Info("the indexer cluster update has been stopped") } -func (s *GRPCService) getIndexerClients() map[string]*grpc.Client { - indexerClients := make(map[string]*grpc.Client, 0) +func (s *GRPCService) getIndexerClients() map[string]*indexer.GRPCClient { + indexerClients := make(map[string]*indexer.GRPCClient, 0) for clusterId, cluster := range s.indexerClients { nodeIds := make([]string, 0) @@ -575,7 +575,23 @@ func (s *GRPCService) getIndexerClients() map[string]*grpc.Client { return indexerClients } -func (s *GRPCService) GetDocument(ctx context.Context, req *protobuf.GetDocumentRequest) (*protobuf.GetDocumentResponse, error) { +func (s *GRPCService) LivenessProbe(ctx context.Context, req *empty.Empty) (*distribute.LivenessProbeResponse, error) { + resp := &distribute.LivenessProbeResponse{ + State: distribute.LivenessProbeResponse_ALIVE, + } + + return resp, nil +} + +func (s *GRPCService) ReadinessProbe(ctx context.Context, req *empty.Empty) (*distribute.ReadinessProbeResponse, error) { + resp := &distribute.ReadinessProbeResponse{ + State: distribute.ReadinessProbeResponse_READY, + } + + return resp, nil +} + +func (s *GRPCService) GetDocument(ctx context.Context, req *distribute.GetDocumentRequest) (*distribute.GetDocumentResponse, error) { indexerClients := s.getIndexerClients() // cluster id list sorted by cluster id @@ -597,7 +613,7 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *protobuf.GetDocument wg := &sync.WaitGroup{} for clusterId, client := range indexerClients { wg.Add(1) - go func(clusterId string, client *grpc.Client, id string, respChan chan respVal) { + go func(clusterId string, client *indexer.GRPCClient, id string, respChan chan respVal) { // index documents fields, err := client.GetDocument(id) wg.Done() @@ -624,7 +640,7 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *protobuf.GetDocument } } - resp := &protobuf.GetDocumentResponse{} + resp := &distribute.GetDocumentResponse{} fieldsAny := &any.Any{} err := protobuf.UnmarshalAny(fields, fieldsAny) @@ -639,10 +655,10 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *protobuf.GetDocument return resp, nil } -func (s *GRPCService) Search(ctx context.Context, req *protobuf.SearchRequest) (*protobuf.SearchResponse, error) { +func (s *GRPCService) Search(ctx context.Context, req *distribute.SearchRequest) (*distribute.SearchResponse, error) { start := time.Now() - resp := &protobuf.SearchResponse{} + resp := &distribute.SearchResponse{} indexerClients := s.getIndexerClients() @@ -679,7 +695,7 @@ func (s *GRPCService) Search(ctx context.Context, req *protobuf.SearchRequest) ( wg := &sync.WaitGroup{} for clusterId, client := range indexerClients { wg.Add(1) - go func(clusterId string, client *grpc.Client, searchRequest *bleve.SearchRequest, respChan chan respVal) { + go func(clusterId string, client *indexer.GRPCClient, searchRequest *bleve.SearchRequest, respChan chan respVal) { searchResult, err := client.Search(searchRequest) wg.Done() respChan <- respVal{ @@ -773,7 +789,7 @@ func (s *GRPCService) docIdHash(docId string) uint64 { return hash.Sum64() } -func (s *GRPCService) IndexDocument(stream protobuf.Blast_IndexDocumentServer) error { +func (s *GRPCService) IndexDocument(stream distribute.Distribute_IndexDocumentServer) error { indexerClients := s.getIndexerClients() // cluster id list sorted by cluster id @@ -861,14 +877,14 @@ func (s *GRPCService) IndexDocument(stream protobuf.Blast_IndexDocumentServer) e } // response - resp := &protobuf.IndexDocumentResponse{ + resp := &distribute.IndexDocumentResponse{ Count: int32(totalCount), } return stream.SendAndClose(resp) } -func (s *GRPCService) DeleteDocument(stream protobuf.Blast_DeleteDocumentServer) error { +func (s *GRPCService) DeleteDocument(stream distribute.Distribute_DeleteDocumentServer) error { indexerClients := s.getIndexerClients() // cluster id list sorted by cluster id @@ -906,7 +922,7 @@ func (s *GRPCService) DeleteDocument(stream protobuf.Blast_DeleteDocumentServer) wg := &sync.WaitGroup{} for clusterId, client := range indexerClients { wg.Add(1) - go func(clusterId string, client *grpc.Client, ids []string, respChan chan respVal) { + go func(clusterId string, client *indexer.GRPCClient, ids []string, respChan chan respVal) { // index documents count, err := client.DeleteDocument(ids) wg.Done() @@ -931,7 +947,7 @@ func (s *GRPCService) DeleteDocument(stream protobuf.Blast_DeleteDocumentServer) } // response - resp := &protobuf.DeleteDocumentResponse{ + resp := &distribute.DeleteDocumentResponse{ Count: int32(totalCount), } diff --git a/dispatcher/http_handler.go b/dispatcher/http_handler.go index 08b0484..e44c681 100644 --- a/dispatcher/http_handler.go +++ b/dispatcher/http_handler.go @@ -23,24 +23,34 @@ import ( "strings" "time" - "github.com/mosuka/blast/indexutils" - "github.com/blevesearch/bleve" "github.com/gorilla/mux" "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/grpc" blasthttp "github.com/mosuka/blast/http" + "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/version" "github.com/prometheus/client_golang/prometheus/promhttp" "go.uber.org/zap" ) -func NewRouter(grpcAddr string, logger *zap.Logger) (*blasthttp.Router, error) { - router, err := blasthttp.NewRouter(grpcAddr, logger) +type Router struct { + mux.Router + + GRPCClient *GRPCClient + logger *zap.Logger +} + +func NewRouter(grpcAddr string, logger *zap.Logger) (*Router, error) { + grpcClient, err := NewGRPCClient(grpcAddr) if err != nil { return nil, err } + router := &Router{ + GRPCClient: grpcClient, + logger: logger, + } + router.StrictSlash(true) router.Handle("/", NewRootHandler(logger)).Methods("GET") @@ -55,6 +65,17 @@ func NewRouter(grpcAddr string, logger *zap.Logger) (*blasthttp.Router, error) { return router, nil } +func (r *Router) Close() error { + r.GRPCClient.Cancel() + + err := r.GRPCClient.Close() + if err != nil { + return err + } + + return nil +} + type RootHandler struct { logger *zap.Logger } @@ -86,11 +107,11 @@ func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } type GetHandler struct { - client *grpc.Client + client *GRPCClient logger *zap.Logger } -func NewGetDocumentHandler(client *grpc.Client, logger *zap.Logger) *GetHandler { +func NewGetDocumentHandler(client *GRPCClient, logger *zap.Logger) *GetHandler { return &GetHandler{ client: client, logger: logger, @@ -152,11 +173,11 @@ func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } type IndexHandler struct { - client *grpc.Client + client *GRPCClient logger *zap.Logger } -func NewSetDocumentHandler(client *grpc.Client, logger *zap.Logger) *IndexHandler { +func NewSetDocumentHandler(client *GRPCClient, logger *zap.Logger) *IndexHandler { return &IndexHandler{ client: client, logger: logger, @@ -376,11 +397,11 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } type DeleteHandler struct { - client *grpc.Client + client *GRPCClient logger *zap.Logger } -func NewDeleteDocumentHandler(client *grpc.Client, logger *zap.Logger) *DeleteHandler { +func NewDeleteDocumentHandler(client *GRPCClient, logger *zap.Logger) *DeleteHandler { return &DeleteHandler{ client: client, logger: logger, @@ -498,11 +519,11 @@ func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } type SearchHandler struct { - client *grpc.Client + client *GRPCClient logger *zap.Logger } -func NewSearchHandler(client *grpc.Client, logger *zap.Logger) *SearchHandler { +func NewSearchHandler(client *GRPCClient, logger *zap.Logger) *SearchHandler { return &SearchHandler{ client: client, logger: logger, diff --git a/dispatcher/http_server.go b/dispatcher/http_server.go new file mode 100644 index 0000000..5d3fbda --- /dev/null +++ b/dispatcher/http_server.go @@ -0,0 +1,69 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dispatcher + +import ( + "net" + "net/http" + + accesslog "github.com/mash/go-accesslog" + "go.uber.org/zap" +) + +type HTTPServer struct { + listener net.Listener + router *Router + + logger *zap.Logger + httpLogger accesslog.Logger +} + +func NewHTTPServer(httpAddr string, router *Router, logger *zap.Logger, httpLogger accesslog.Logger) (*HTTPServer, error) { + listener, err := net.Listen("tcp", httpAddr) + if err != nil { + return nil, err + } + + return &HTTPServer{ + listener: listener, + router: router, + logger: logger, + httpLogger: httpLogger, + }, nil +} + +func (s *HTTPServer) Start() error { + err := http.Serve( + s.listener, + accesslog.NewLoggingHandler( + s.router, + s.httpLogger, + ), + ) + if err != nil { + return err + } + + return nil +} + +func (s *HTTPServer) Stop() error { + err := s.listener.Close() + if err != nil { + return err + } + + return nil +} diff --git a/dispatcher/server.go b/dispatcher/server.go index b25debb..d088d51 100644 --- a/dispatcher/server.go +++ b/dispatcher/server.go @@ -17,8 +17,6 @@ package dispatcher import ( accesslog "github.com/mash/go-accesslog" "github.com/mosuka/blast/config" - "github.com/mosuka/blast/grpc" - "github.com/mosuka/blast/http" "go.uber.org/zap" ) @@ -30,9 +28,9 @@ type Server struct { httpLogger accesslog.Logger grpcService *GRPCService - grpcServer *grpc.Server - httpRouter *http.Router - httpServer *http.Server + grpcServer *GRPCServer + httpRouter *Router + httpServer *HTTPServer } func NewServer(clusterConfig *config.ClusterConfig, nodeConfig *config.NodeConfig, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { @@ -56,7 +54,7 @@ func (s *Server) Start() { } // create gRPC server - s.grpcServer, err = grpc.NewServer(s.nodeConfig.GRPCAddr, s.grpcService, s.grpcLogger) + s.grpcServer, err = NewGRPCServer(s.nodeConfig.GRPCAddr, s.grpcService, s.grpcLogger) if err != nil { s.logger.Fatal(err.Error()) return @@ -70,7 +68,7 @@ func (s *Server) Start() { } // create HTTP server - s.httpServer, err = http.NewServer(s.nodeConfig.HTTPAddr, s.httpRouter, s.logger, s.httpLogger) + s.httpServer, err = NewHTTPServer(s.nodeConfig.HTTPAddr, s.httpRouter, s.logger, s.httpLogger) if err != nil { s.logger.Fatal(err.Error()) return diff --git a/dispatcher/server_test.go b/dispatcher/server_test.go index bbe660d..a6f7d86 100644 --- a/dispatcher/server_test.go +++ b/dispatcher/server_test.go @@ -23,7 +23,6 @@ import ( "github.com/hashicorp/raft" "github.com/mosuka/blast/config" - "github.com/mosuka/blast/grpc" "github.com/mosuka/blast/indexer" "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/manager" @@ -120,7 +119,7 @@ func TestServer_Start(t *testing.T) { time.Sleep(5 * time.Second) // gRPC client for manager1 - managerClient1, err := grpc.NewClient(managerNodeConfig1.GRPCAddr) + managerClient1, err := manager.NewGRPCClient(managerNodeConfig1.GRPCAddr) defer func() { _ = managerClient1.Close() }() @@ -240,7 +239,7 @@ func TestServer_Start(t *testing.T) { time.Sleep(5 * time.Second) // gRPC client for manager1 - indexerClient1, err := grpc.NewClient(indexerNodeConfig1.GRPCAddr) + indexerClient1, err := indexer.NewGRPCClient(indexerNodeConfig1.GRPCAddr) defer func() { _ = indexerClient1.Close() }() @@ -360,7 +359,7 @@ func TestServer_Start(t *testing.T) { time.Sleep(5 * time.Second) // gRPC client for manager1 - indexerClient2, err := grpc.NewClient(indexerNodeConfig4.GRPCAddr) + indexerClient2, err := indexer.NewGRPCClient(indexerNodeConfig4.GRPCAddr) defer func() { _ = indexerClient1.Close() }() diff --git a/grpc/service.go b/grpc/service.go deleted file mode 100644 index 5882e52..0000000 --- a/grpc/service.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpc - -import ( - "context" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/protobuf" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type Service struct{} - -func (s *Service) Start() error { - return nil -} - -func (s *Service) Stop() error { - return nil -} - -func (s *Service) LivenessProbe(ctx context.Context, req *empty.Empty) (*protobuf.LivenessProbeResponse, error) { - resp := &protobuf.LivenessProbeResponse{ - State: protobuf.LivenessProbeResponse_ALIVE, - } - - return resp, nil -} - -func (s *Service) ReadinessProbe(ctx context.Context, req *empty.Empty) (*protobuf.ReadinessProbeResponse, error) { - resp := &protobuf.ReadinessProbeResponse{ - State: protobuf.ReadinessProbeResponse_READY, - } - - return resp, nil -} - -func (s *Service) GetNode(ctx context.Context, req *protobuf.GetNodeRequest) (*protobuf.GetNodeResponse, error) { - return &protobuf.GetNodeResponse{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) SetNode(ctx context.Context, req *protobuf.SetNodeRequest) (*empty.Empty, error) { - return &empty.Empty{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) DeleteNode(ctx context.Context, req *protobuf.DeleteNodeRequest) (*empty.Empty, error) { - return &empty.Empty{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) GetCluster(ctx context.Context, req *empty.Empty) (*protobuf.GetClusterResponse, error) { - return &protobuf.GetClusterResponse{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) WatchCluster(req *empty.Empty, server protobuf.Blast_WatchClusterServer) error { - return status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { - return &empty.Empty{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) GetValue(ctx context.Context, req *protobuf.GetValueRequest) (*protobuf.GetValueResponse, error) { - return &protobuf.GetValueResponse{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) SetValue(ctx context.Context, req *protobuf.SetValueRequest) (*empty.Empty, error) { - return &empty.Empty{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) DeleteValue(ctx context.Context, req *protobuf.DeleteValueRequest) (*empty.Empty, error) { - return &empty.Empty{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) WatchStore(req *protobuf.WatchStoreRequest, server protobuf.Blast_WatchStoreServer) error { - return status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) GetDocument(ctx context.Context, req *protobuf.GetDocumentRequest) (*protobuf.GetDocumentResponse, error) { - return &protobuf.GetDocumentResponse{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) Search(ctx context.Context, req *protobuf.SearchRequest) (*protobuf.SearchResponse, error) { - return &protobuf.SearchResponse{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) IndexDocument(stream protobuf.Blast_IndexDocumentServer) error { - return status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) DeleteDocument(stream protobuf.Blast_DeleteDocumentServer) error { - return status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) GetIndexConfig(ctx context.Context, req *empty.Empty) (*protobuf.GetIndexConfigResponse, error) { - return &protobuf.GetIndexConfigResponse{}, status.Error(codes.Unavailable, "not implement") -} - -func (s *Service) GetIndexStats(ctx context.Context, req *empty.Empty) (*protobuf.GetIndexStatsResponse, error) { - return &protobuf.GetIndexStatsResponse{}, status.Error(codes.Unavailable, "not implement") -} diff --git a/grpc/client.go b/indexer/grpc_client.go similarity index 64% rename from grpc/client.go rename to indexer/grpc_client.go index 2d87208..e5cdbf6 100644 --- a/grpc/client.go +++ b/indexer/grpc_client.go @@ -12,40 +12,40 @@ // See the License for the specific language governing permissions and // limitations under the License. -package grpc +package indexer import ( "context" "errors" "math" - "github.com/mosuka/blast/indexutils" - "github.com/blevesearch/bleve" "github.com/golang/protobuf/ptypes/any" "github.com/golang/protobuf/ptypes/empty" blasterrors "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/index" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) -type Client struct { +type GRPCClient struct { ctx context.Context cancel context.CancelFunc conn *grpc.ClientConn - client protobuf.BlastClient + client index.IndexClient } -func NewContext() (context.Context, context.CancelFunc) { +func NewGRPCContext() (context.Context, context.CancelFunc) { baseCtx := context.TODO() //return context.WithTimeout(baseCtx, 60*time.Second) return context.WithCancel(baseCtx) } -func NewClient(address string) (*Client, error) { - ctx, cancel := NewContext() +func NewGRPCClient(address string) (*GRPCClient, error) { + ctx, cancel := NewGRPCContext() //streamRetryOpts := []grpc_retry.CallOption{ // grpc_retry.Disable(), @@ -72,19 +72,19 @@ func NewClient(address string) (*Client, error) { return nil, err } - return &Client{ + return &GRPCClient{ ctx: ctx, cancel: cancel, conn: conn, - client: protobuf.NewBlastClient(conn), + client: index.NewIndexClient(conn), }, nil } -func (c *Client) Cancel() { +func (c *GRPCClient) Cancel() { c.cancel() } -func (c *Client) Close() error { +func (c *GRPCClient) Close() error { c.Cancel() if c.conn != nil { return c.conn.Close() @@ -93,12 +93,34 @@ func (c *Client) Close() error { return c.ctx.Err() } -func (c *Client) GetAddress() string { +func (c *GRPCClient) GetAddress() string { return c.conn.Target() } -func (c *Client) GetNode(id string, opts ...grpc.CallOption) (map[string]interface{}, error) { - req := &protobuf.GetNodeRequest{ +func (c *GRPCClient) LivenessProbe(opts ...grpc.CallOption) (string, error) { + resp, err := c.client.LivenessProbe(c.ctx, &empty.Empty{}) + if err != nil { + st, _ := status.FromError(err) + + return index.LivenessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) + } + + return resp.State.String(), nil +} + +func (c *GRPCClient) ReadinessProbe(opts ...grpc.CallOption) (string, error) { + resp, err := c.client.ReadinessProbe(c.ctx, &empty.Empty{}) + if err != nil { + st, _ := status.FromError(err) + + return index.ReadinessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) + } + + return resp.State.String(), nil +} + +func (c *GRPCClient) GetNode(id string, opts ...grpc.CallOption) (map[string]interface{}, error) { + req := &index.GetNodeRequest{ Id: id, } @@ -120,14 +142,14 @@ func (c *Client) GetNode(id string, opts ...grpc.CallOption) (map[string]interfa return node, nil } -func (c *Client) SetNode(id string, nodeConfig map[string]interface{}, opts ...grpc.CallOption) error { +func (c *GRPCClient) SetNode(id string, nodeConfig map[string]interface{}, opts ...grpc.CallOption) error { nodeConfigAny := &any.Any{} err := protobuf.UnmarshalAny(nodeConfig, nodeConfigAny) if err != nil { return err } - req := &protobuf.SetNodeRequest{ + req := &index.SetNodeRequest{ Id: id, NodeConfig: nodeConfigAny, } @@ -140,8 +162,8 @@ func (c *Client) SetNode(id string, nodeConfig map[string]interface{}, opts ...g return nil } -func (c *Client) DeleteNode(id string, opts ...grpc.CallOption) error { - req := &protobuf.DeleteNodeRequest{ +func (c *GRPCClient) DeleteNode(id string, opts ...grpc.CallOption) error { + req := &index.DeleteNodeRequest{ Id: id, } @@ -153,7 +175,7 @@ func (c *Client) DeleteNode(id string, opts ...grpc.CallOption) error { return nil } -func (c *Client) GetCluster(opts ...grpc.CallOption) (map[string]interface{}, error) { +func (c *GRPCClient) GetCluster(opts ...grpc.CallOption) (map[string]interface{}, error) { resp, err := c.client.GetCluster(c.ctx, &empty.Empty{}, opts...) if err != nil { st, _ := status.FromError(err) @@ -167,7 +189,7 @@ func (c *Client) GetCluster(opts ...grpc.CallOption) (map[string]interface{}, er return cluster, nil } -func (c *Client) WatchCluster(opts ...grpc.CallOption) (protobuf.Blast_WatchClusterClient, error) { +func (c *GRPCClient) WatchCluster(opts ...grpc.CallOption) (index.Index_WatchClusterClient, error) { req := &empty.Empty{} watchClient, err := c.client.WatchCluster(c.ctx, req, opts...) @@ -179,124 +201,8 @@ func (c *Client) WatchCluster(opts ...grpc.CallOption) (protobuf.Blast_WatchClus return watchClient, nil } -func (c *Client) Snapshot(opts ...grpc.CallOption) error { - _, err := c.client.Snapshot(c.ctx, &empty.Empty{}) - if err != nil { - st, _ := status.FromError(err) - - return errors.New(st.Message()) - } - - return nil -} - -func (c *Client) LivenessProbe(opts ...grpc.CallOption) (string, error) { - resp, err := c.client.LivenessProbe(c.ctx, &empty.Empty{}) - if err != nil { - st, _ := status.FromError(err) - - return protobuf.LivenessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) - } - - return resp.State.String(), nil -} - -func (c *Client) ReadinessProbe(opts ...grpc.CallOption) (string, error) { - resp, err := c.client.ReadinessProbe(c.ctx, &empty.Empty{}) - if err != nil { - st, _ := status.FromError(err) - - return protobuf.ReadinessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) - } - - return resp.State.String(), nil -} - -func (c *Client) GetValue(key string, opts ...grpc.CallOption) (interface{}, error) { - req := &protobuf.GetValueRequest{ - Key: key, - } - - resp, err := c.client.GetValue(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - switch st.Code() { - case codes.NotFound: - return nil, blasterrors.ErrNotFound - default: - return nil, errors.New(st.Message()) - } - } - - value, err := protobuf.MarshalAny(resp.Value) - - return value, nil -} - -func (c *Client) SetValue(key string, value interface{}, opts ...grpc.CallOption) error { - valueAny := &any.Any{} - err := protobuf.UnmarshalAny(value, valueAny) - if err != nil { - return err - } - - req := &protobuf.SetValueRequest{ - Key: key, - Value: valueAny, - } - - _, err = c.client.SetValue(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - switch st.Code() { - case codes.NotFound: - return blasterrors.ErrNotFound - default: - return errors.New(st.Message()) - } - } - - return nil -} - -func (c *Client) DeleteValue(key string, opts ...grpc.CallOption) error { - req := &protobuf.DeleteValueRequest{ - Key: key, - } - - _, err := c.client.DeleteValue(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - switch st.Code() { - case codes.NotFound: - return blasterrors.ErrNotFound - default: - return errors.New(st.Message()) - } - } - - return nil -} - -func (c *Client) WatchStore(key string, opts ...grpc.CallOption) (protobuf.Blast_WatchStoreClient, error) { - req := &protobuf.WatchStoreRequest{ - Key: key, - } - - watchClient, err := c.client.WatchStore(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - return nil, errors.New(st.Message()) - } - - return watchClient, nil -} - -func (c *Client) GetDocument(id string, opts ...grpc.CallOption) (map[string]interface{}, error) { - req := &protobuf.GetDocumentRequest{ +func (c *GRPCClient) GetDocument(id string, opts ...grpc.CallOption) (map[string]interface{}, error) { + req := &index.GetDocumentRequest{ Id: id, } @@ -318,7 +224,7 @@ func (c *Client) GetDocument(id string, opts ...grpc.CallOption) (map[string]int return fields, nil } -func (c *Client) Search(searchRequest *bleve.SearchRequest, opts ...grpc.CallOption) (*bleve.SearchResult, error) { +func (c *GRPCClient) Search(searchRequest *bleve.SearchRequest, opts ...grpc.CallOption) (*bleve.SearchResult, error) { // bleve.SearchRequest -> Any searchRequestAny := &any.Any{} err := protobuf.UnmarshalAny(searchRequest, searchRequestAny) @@ -326,7 +232,7 @@ func (c *Client) Search(searchRequest *bleve.SearchRequest, opts ...grpc.CallOpt return nil, err } - req := &protobuf.SearchRequest{ + req := &index.SearchRequest{ SearchRequest: searchRequestAny, } @@ -352,7 +258,7 @@ func (c *Client) Search(searchRequest *bleve.SearchRequest, opts ...grpc.CallOpt return searchResult, nil } -func (c *Client) IndexDocument(docs []*indexutils.Document, opts ...grpc.CallOption) (int, error) { +func (c *GRPCClient) IndexDocument(docs []*indexutils.Document, opts ...grpc.CallOption) (int, error) { stream, err := c.client.IndexDocument(c.ctx, opts...) if err != nil { st, _ := status.FromError(err) @@ -370,7 +276,7 @@ func (c *Client) IndexDocument(docs []*indexutils.Document, opts ...grpc.CallOpt return -1, err } - req := &protobuf.IndexDocumentRequest{ + req := &index.IndexDocumentRequest{ Id: id, Fields: fieldsAny, } @@ -389,7 +295,7 @@ func (c *Client) IndexDocument(docs []*indexutils.Document, opts ...grpc.CallOpt return int(resp.Count), nil } -func (c *Client) DeleteDocument(ids []string, opts ...grpc.CallOption) (int, error) { +func (c *GRPCClient) DeleteDocument(ids []string, opts ...grpc.CallOption) (int, error) { stream, err := c.client.DeleteDocument(c.ctx, opts...) if err != nil { st, _ := status.FromError(err) @@ -398,7 +304,7 @@ func (c *Client) DeleteDocument(ids []string, opts ...grpc.CallOption) (int, err } for _, id := range ids { - req := &protobuf.DeleteDocumentRequest{ + req := &index.DeleteDocumentRequest{ Id: id, } @@ -416,7 +322,7 @@ func (c *Client) DeleteDocument(ids []string, opts ...grpc.CallOption) (int, err return int(resp.Count), nil } -func (c *Client) GetIndexConfig(opts ...grpc.CallOption) (map[string]interface{}, error) { +func (c *GRPCClient) GetIndexConfig(opts ...grpc.CallOption) (map[string]interface{}, error) { resp, err := c.client.GetIndexConfig(c.ctx, &empty.Empty{}, opts...) if err != nil { st, _ := status.FromError(err) @@ -435,7 +341,7 @@ func (c *Client) GetIndexConfig(opts ...grpc.CallOption) (map[string]interface{} return indexConfig, nil } -func (c *Client) GetIndexStats(opts ...grpc.CallOption) (map[string]interface{}, error) { +func (c *GRPCClient) GetIndexStats(opts ...grpc.CallOption) (map[string]interface{}, error) { resp, err := c.client.GetIndexStats(c.ctx, &empty.Empty{}, opts...) if err != nil { st, _ := status.FromError(err) @@ -453,3 +359,14 @@ func (c *Client) GetIndexStats(opts ...grpc.CallOption) (map[string]interface{}, return indexStats, nil } + +func (c *GRPCClient) Snapshot(opts ...grpc.CallOption) error { + _, err := c.client.Snapshot(c.ctx, &empty.Empty{}) + if err != nil { + st, _ := status.FromError(err) + + return errors.New(st.Message()) + } + + return nil +} diff --git a/grpc/server.go b/indexer/grpc_server.go similarity index 87% rename from grpc/server.go rename to indexer/grpc_server.go index 9360656..8dd8c78 100644 --- a/grpc/server.go +++ b/indexer/grpc_server.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package grpc +package indexer import ( "net" @@ -20,7 +20,7 @@ import ( grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/index" "go.uber.org/zap" "google.golang.org/grpc" //grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" @@ -29,15 +29,15 @@ import ( //grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" ) -type Server struct { - service protobuf.BlastServer +type GRPCServer struct { + service index.IndexServer server *grpc.Server listener net.Listener logger *zap.Logger } -func NewServer(grpcAddr string, service protobuf.BlastServer, logger *zap.Logger) (*Server, error) { +func NewGRPCServer(grpcAddr string, service index.IndexServer, logger *zap.Logger) (*GRPCServer, error) { server := grpc.NewServer( grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( //grpc_ctxtags.StreamServerInterceptor(), @@ -57,7 +57,7 @@ func NewServer(grpcAddr string, service protobuf.BlastServer, logger *zap.Logger )), ) - protobuf.RegisterBlastServer(server, service) + index.RegisterIndexServer(server, service) grpc_prometheus.EnableHandlingTimeHistogram() grpc_prometheus.Register(server) @@ -67,7 +67,7 @@ func NewServer(grpcAddr string, service protobuf.BlastServer, logger *zap.Logger return nil, err } - return &Server{ + return &GRPCServer{ service: service, server: server, listener: listener, @@ -75,7 +75,7 @@ func NewServer(grpcAddr string, service protobuf.BlastServer, logger *zap.Logger }, nil } -func (s *Server) Start() error { +func (s *GRPCServer) Start() error { s.logger.Info("start server") err := s.server.Serve(s.listener) if err != nil { @@ -85,7 +85,7 @@ func (s *Server) Start() error { return nil } -func (s *Server) Stop() error { +func (s *GRPCServer) Stop() error { s.logger.Info("stop server") s.server.Stop() //s.server.GracefulStop() diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go index f9a6772..0b56280 100644 --- a/indexer/grpc_service.go +++ b/indexer/grpc_service.go @@ -23,23 +23,23 @@ import ( "sync" "time" - "github.com/mosuka/blast/indexutils" - "github.com/blevesearch/bleve" "github.com/golang/protobuf/ptypes/any" "github.com/golang/protobuf/ptypes/empty" "github.com/hashicorp/raft" "github.com/mosuka/blast/config" blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/grpc" + "github.com/mosuka/blast/indexutils" + "github.com/mosuka/blast/manager" "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/index" "go.uber.org/zap" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) type GRPCService struct { - *grpc.Service + //*grpc.Service clusterConfig *config.ClusterConfig raftServer *RaftServer @@ -48,13 +48,13 @@ type GRPCService struct { updateClusterStopCh chan struct{} updateClusterDoneCh chan struct{} peers map[string]interface{} - peerClients map[string]*grpc.Client + peerClients map[string]*GRPCClient cluster map[string]interface{} - clusterChans map[chan protobuf.GetClusterResponse]struct{} + clusterChans map[chan index.GetClusterResponse]struct{} clusterMutex sync.RWMutex managers map[string]interface{} - managerClients map[string]*grpc.Client + managerClients map[string]*manager.GRPCClient updateManagersStopCh chan struct{} updateManagersDoneCh chan struct{} } @@ -66,12 +66,12 @@ func NewGRPCService(clusterConfig *config.ClusterConfig, raftServer *RaftServer, logger: logger, peers: make(map[string]interface{}, 0), - peerClients: make(map[string]*grpc.Client, 0), + peerClients: make(map[string]*GRPCClient, 0), cluster: make(map[string]interface{}, 0), - clusterChans: make(map[chan protobuf.GetClusterResponse]struct{}), + clusterChans: make(map[chan index.GetClusterResponse]struct{}), managers: make(map[string]interface{}, 0), - managerClients: make(map[string]*grpc.Client, 0), + managerClients: make(map[string]*manager.GRPCClient, 0), }, nil } @@ -99,8 +99,8 @@ func (s *GRPCService) Stop() error { return nil } -func (s *GRPCService) getManagerClient() (*grpc.Client, error) { - var client *grpc.Client +func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { + var client *manager.GRPCClient for id, node := range s.managers { nm, ok := node.(map[string]interface{}) @@ -134,7 +134,7 @@ func (s *GRPCService) getManagerClient() (*grpc.Client, error) { } func (s *GRPCService) getInitialManagers(managerAddr string) (map[string]interface{}, error) { - client, err := grpc.NewClient(managerAddr) + client, err := manager.NewGRPCClient(managerAddr) defer func() { err := client.Close() if err != nil { @@ -195,7 +195,7 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { } s.logger.Debug("create gRPC client", zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) - client, err := grpc.NewClient(grpcAddr) + client, err := manager.NewGRPCClient(grpcAddr) if err != nil { s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) } @@ -275,7 +275,7 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.logger.Error(err.Error(), zap.String("node_id", nodeId)) } - newClient, err := grpc.NewClient(grpcAddr) + newClient, err := manager.NewGRPCClient(grpcAddr) if err != nil { s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) } @@ -290,7 +290,7 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - newClient, err := grpc.NewClient(grpcAddr) + newClient, err := manager.NewGRPCClient(grpcAddr) if err != nil { s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) } @@ -344,8 +344,8 @@ func (s *GRPCService) stopUpdateManagers() { s.logger.Info("the manager cluster update has been stopped") } -func (s *GRPCService) getLeaderClient() (*grpc.Client, error) { - var client *grpc.Client +func (s *GRPCService) getLeaderClient() (*GRPCClient, error) { + var client *GRPCClient for id, node := range s.cluster { state, ok := node.(map[string]interface{})["state"].(string) @@ -435,7 +435,7 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { s.logger.Warn(err.Error(), zap.String("node_id", nodeId)) } - newClient, err := grpc.NewClient(grpcAddr) + newClient, err := NewGRPCClient(grpcAddr) if err != nil { s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) } @@ -450,7 +450,7 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - peerClient, err := grpc.NewClient(grpcAddr) + peerClient, err := NewGRPCClient(grpcAddr) if err != nil { s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) } @@ -485,7 +485,7 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { // notify current cluster if !reflect.DeepEqual(s.cluster, cluster) { // convert to GetClusterResponse for channel output - clusterResp := &protobuf.GetClusterResponse{} + clusterResp := &index.GetClusterResponse{} clusterAny := &any.Any{} err = protobuf.UnmarshalAny(cluster, clusterAny) if err != nil { @@ -540,6 +540,22 @@ func (s *GRPCService) stopUpdateCluster() { s.logger.Info("the cluster update has been stopped") } +func (s *GRPCService) LivenessProbe(ctx context.Context, req *empty.Empty) (*index.LivenessProbeResponse, error) { + resp := &index.LivenessProbeResponse{ + State: index.LivenessProbeResponse_ALIVE, + } + + return resp, nil +} + +func (s *GRPCService) ReadinessProbe(ctx context.Context, req *empty.Empty) (*index.ReadinessProbeResponse, error) { + resp := &index.ReadinessProbeResponse{ + State: index.ReadinessProbeResponse_READY, + } + + return resp, nil +} + func (s *GRPCService) NodeID() string { return s.raftServer.NodeID() } @@ -593,8 +609,8 @@ func (s *GRPCService) getNode(id string) (map[string]interface{}, error) { return nodeInfo, nil } -func (s *GRPCService) GetNode(ctx context.Context, req *protobuf.GetNodeRequest) (*protobuf.GetNodeResponse, error) { - resp := &protobuf.GetNodeResponse{} +func (s *GRPCService) GetNode(ctx context.Context, req *index.GetNodeRequest) (*index.GetNodeResponse, error) { + resp := &index.GetNodeResponse{} nodeInfo, err := s.getNode(req.Id) if err != nil { @@ -649,7 +665,7 @@ func (s *GRPCService) setNode(id string, nodeConfig map[string]interface{}) erro return nil } -func (s *GRPCService) SetNode(ctx context.Context, req *protobuf.SetNodeRequest) (*empty.Empty, error) { +func (s *GRPCService) SetNode(ctx context.Context, req *index.SetNodeRequest) (*empty.Empty, error) { resp := &empty.Empty{} ins, err := protobuf.MarshalAny(req.NodeConfig) @@ -693,7 +709,7 @@ func (s *GRPCService) deleteNode(id string) error { return nil } -func (s *GRPCService) DeleteNode(ctx context.Context, req *protobuf.DeleteNodeRequest) (*empty.Empty, error) { +func (s *GRPCService) DeleteNode(ctx context.Context, req *index.DeleteNodeRequest) (*empty.Empty, error) { resp := &empty.Empty{} err := s.deleteNode(req.Id) @@ -730,8 +746,8 @@ func (s *GRPCService) getCluster() (map[string]interface{}, error) { return cluster, nil } -func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*protobuf.GetClusterResponse, error) { - resp := &protobuf.GetClusterResponse{} +func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*index.GetClusterResponse, error) { + resp := &index.GetClusterResponse{} cluster, err := s.getCluster() if err != nil { @@ -751,8 +767,8 @@ func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*protob return resp, nil } -func (s *GRPCService) WatchCluster(req *empty.Empty, server protobuf.Blast_WatchClusterServer) error { - chans := make(chan protobuf.GetClusterResponse) +func (s *GRPCService) WatchCluster(req *empty.Empty, server index.Index_WatchClusterServer) error { + chans := make(chan index.GetClusterResponse) s.clusterMutex.Lock() s.clusterChans[chans] = struct{}{} @@ -776,20 +792,8 @@ func (s *GRPCService) WatchCluster(req *empty.Empty, server protobuf.Blast_Watch return nil } -func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { - resp := &empty.Empty{} - - err := s.raftServer.Snapshot() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) GetDocument(ctx context.Context, req *protobuf.GetDocumentRequest) (*protobuf.GetDocumentResponse, error) { - resp := &protobuf.GetDocumentResponse{} +func (s *GRPCService) GetDocument(ctx context.Context, req *index.GetDocumentRequest) (*index.GetDocumentResponse, error) { + resp := &index.GetDocumentResponse{} fields, err := s.raftServer.GetDocument(req.Id) if err != nil { @@ -814,8 +818,8 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *protobuf.GetDocument return resp, nil } -func (s *GRPCService) Search(ctx context.Context, req *protobuf.SearchRequest) (*protobuf.SearchResponse, error) { - resp := &protobuf.SearchResponse{} +func (s *GRPCService) Search(ctx context.Context, req *index.SearchRequest) (*index.SearchResponse, error) { + resp := &index.SearchResponse{} searchRequest, err := protobuf.MarshalAny(req.SearchRequest) if err != nil { @@ -841,7 +845,7 @@ func (s *GRPCService) Search(ctx context.Context, req *protobuf.SearchRequest) ( return resp, nil } -func (s *GRPCService) IndexDocument(stream protobuf.Blast_IndexDocumentServer) error { +func (s *GRPCService) IndexDocument(stream index.Index_IndexDocumentServer) error { docs := make([]*indexutils.Document, 0) for { @@ -897,13 +901,13 @@ func (s *GRPCService) IndexDocument(stream protobuf.Blast_IndexDocumentServer) e } return stream.SendAndClose( - &protobuf.IndexDocumentResponse{ + &index.IndexDocumentResponse{ Count: int32(count), }, ) } -func (s *GRPCService) DeleteDocument(stream protobuf.Blast_DeleteDocumentServer) error { +func (s *GRPCService) DeleteDocument(stream index.Index_DeleteDocumentServer) error { ids := make([]string, 0) for { @@ -944,14 +948,14 @@ func (s *GRPCService) DeleteDocument(stream protobuf.Blast_DeleteDocumentServer) } return stream.SendAndClose( - &protobuf.DeleteDocumentResponse{ + &index.DeleteDocumentResponse{ Count: int32(count), }, ) } -func (s *GRPCService) GetIndexConfig(ctx context.Context, req *empty.Empty) (*protobuf.GetIndexConfigResponse, error) { - resp := &protobuf.GetIndexConfigResponse{} +func (s *GRPCService) GetIndexConfig(ctx context.Context, req *empty.Empty) (*index.GetIndexConfigResponse, error) { + resp := &index.GetIndexConfigResponse{} indexConfig, err := s.raftServer.GetIndexConfig() if err != nil { @@ -971,8 +975,8 @@ func (s *GRPCService) GetIndexConfig(ctx context.Context, req *empty.Empty) (*pr return resp, nil } -func (s *GRPCService) GetIndexStats(ctx context.Context, req *empty.Empty) (*protobuf.GetIndexStatsResponse, error) { - resp := &protobuf.GetIndexStatsResponse{} +func (s *GRPCService) GetIndexStats(ctx context.Context, req *empty.Empty) (*index.GetIndexStatsResponse, error) { + resp := &index.GetIndexStatsResponse{} indexStats, err := s.raftServer.GetIndexStats() if err != nil { @@ -991,3 +995,15 @@ func (s *GRPCService) GetIndexStats(ctx context.Context, req *empty.Empty) (*pro return resp, nil } + +func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { + resp := &empty.Empty{} + + err := s.raftServer.Snapshot() + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} diff --git a/indexer/http_handler.go b/indexer/http_handler.go index 3ad4dea..697cc2e 100644 --- a/indexer/http_handler.go +++ b/indexer/http_handler.go @@ -26,7 +26,6 @@ import ( "github.com/blevesearch/bleve" "github.com/gorilla/mux" blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/grpc" blasthttp "github.com/mosuka/blast/http" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/version" @@ -34,12 +33,24 @@ import ( "go.uber.org/zap" ) -func NewRouter(grpcAddr string, logger *zap.Logger) (*blasthttp.Router, error) { - router, err := blasthttp.NewRouter(grpcAddr, logger) +type Router struct { + mux.Router + + GRPCClient *GRPCClient + logger *zap.Logger +} + +func NewRouter(grpcAddr string, logger *zap.Logger) (*Router, error) { + grpcClient, err := NewGRPCClient(grpcAddr) if err != nil { return nil, err } + router := &Router{ + GRPCClient: grpcClient, + logger: logger, + } + router.StrictSlash(true) router.Handle("/", NewRootHandler(logger)).Methods("GET") @@ -54,6 +65,17 @@ func NewRouter(grpcAddr string, logger *zap.Logger) (*blasthttp.Router, error) { return router, nil } +func (r *Router) Close() error { + r.GRPCClient.Cancel() + + err := r.GRPCClient.Close() + if err != nil { + return err + } + + return nil +} + type RootHandler struct { logger *zap.Logger } @@ -85,11 +107,11 @@ func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } type GetHandler struct { - client *grpc.Client + client *GRPCClient logger *zap.Logger } -func NewGetDocumentHandler(client *grpc.Client, logger *zap.Logger) *GetHandler { +func NewGetDocumentHandler(client *GRPCClient, logger *zap.Logger) *GetHandler { return &GetHandler{ client: client, logger: logger, @@ -153,11 +175,11 @@ func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } type IndexHandler struct { - client *grpc.Client + client *GRPCClient logger *zap.Logger } -func NewSetDocumentHandler(client *grpc.Client, logger *zap.Logger) *IndexHandler { +func NewSetDocumentHandler(client *GRPCClient, logger *zap.Logger) *IndexHandler { return &IndexHandler{ client: client, logger: logger, @@ -377,11 +399,11 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } type DeleteHandler struct { - client *grpc.Client + client *GRPCClient logger *zap.Logger } -func NewDeleteDocumentHandler(client *grpc.Client, logger *zap.Logger) *DeleteHandler { +func NewDeleteDocumentHandler(client *GRPCClient, logger *zap.Logger) *DeleteHandler { return &DeleteHandler{ client: client, logger: logger, @@ -501,11 +523,11 @@ func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } type SearchHandler struct { - client *grpc.Client + client *GRPCClient logger *zap.Logger } -func NewSearchHandler(client *grpc.Client, logger *zap.Logger) *SearchHandler { +func NewSearchHandler(client *GRPCClient, logger *zap.Logger) *SearchHandler { return &SearchHandler{ client: client, logger: logger, diff --git a/http/server.go b/indexer/http_server.go similarity index 82% rename from http/server.go rename to indexer/http_server.go index 8cdb7cf..238da55 100644 --- a/http/server.go +++ b/indexer/http_server.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package http +package indexer import ( "net" @@ -22,7 +22,7 @@ import ( "go.uber.org/zap" ) -type Server struct { +type HTTPServer struct { listener net.Listener router *Router @@ -30,13 +30,13 @@ type Server struct { httpLogger accesslog.Logger } -func NewServer(httpAddr string, router *Router, logger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { +func NewHTTPServer(httpAddr string, router *Router, logger *zap.Logger, httpLogger accesslog.Logger) (*HTTPServer, error) { listener, err := net.Listen("tcp", httpAddr) if err != nil { return nil, err } - return &Server{ + return &HTTPServer{ listener: listener, router: router, logger: logger, @@ -44,7 +44,7 @@ func NewServer(httpAddr string, router *Router, logger *zap.Logger, httpLogger a }, nil } -func (s *Server) Start() error { +func (s *HTTPServer) Start() error { err := http.Serve( s.listener, accesslog.NewLoggingHandler( @@ -59,7 +59,7 @@ func (s *Server) Start() error { return nil } -func (s *Server) Stop() error { +func (s *HTTPServer) Stop() error { err := s.listener.Close() if err != nil { return err diff --git a/indexer/index.go b/indexer/index.go index 82e2ba3..f208a17 100644 --- a/indexer/index.go +++ b/indexer/index.go @@ -19,6 +19,8 @@ import ( "os" "time" + "github.com/mosuka/blast/protobuf/index" + "github.com/blevesearch/bleve" "github.com/blevesearch/bleve/document" "github.com/golang/protobuf/ptypes/any" @@ -216,8 +218,8 @@ func (i *Index) Stats() (map[string]interface{}, error) { return i.index.StatsMap(), nil } -func (i *Index) SnapshotItems() <-chan *protobuf.Document { - ch := make(chan *protobuf.Document, 1024) +func (i *Index) SnapshotItems() <-chan *index.Document { + ch := make(chan *index.Document, 1024) go func() { idx, _, err := i.index.Advanced() @@ -268,7 +270,7 @@ func (i *Index) SnapshotItems() <-chan *protobuf.Document { break } - doc := &protobuf.Document{ + doc := &index.Document{ Id: string(id), Fields: fieldsAny, } diff --git a/indexer/raft_fsm.go b/indexer/raft_fsm.go index 381cdc5..5be21e4 100644 --- a/indexer/raft_fsm.go +++ b/indexer/raft_fsm.go @@ -21,6 +21,8 @@ import ( "io/ioutil" "sync" + "github.com/mosuka/blast/protobuf/index" + "github.com/blevesearch/bleve" "github.com/golang/protobuf/proto" "github.com/hashicorp/raft" @@ -278,7 +280,7 @@ func (f *RaftFSM) Restore(rc io.ReadCloser) error { buff := proto.NewBuffer(data) for { - doc := &protobuf.Document{} + doc := &index.Document{} err = buff.DecodeMessage(doc) if err == io.ErrUnexpectedEOF { break diff --git a/indexer/raft_server.go b/indexer/raft_server.go index 6e1f511..5d8d1c8 100644 --- a/indexer/raft_server.go +++ b/indexer/raft_server.go @@ -23,8 +23,6 @@ import ( "path/filepath" "time" - "github.com/mosuka/blast/indexutils" - "github.com/blevesearch/bleve" "github.com/hashicorp/raft" raftboltdb "github.com/hashicorp/raft-boltdb" @@ -32,6 +30,7 @@ import ( _ "github.com/mosuka/blast/builtins" "github.com/mosuka/blast/config" blasterrors "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/indexutils" "go.uber.org/zap" //raftmdb "github.com/hashicorp/raft-mdb" ) diff --git a/indexer/server.go b/indexer/server.go index d56de34..b60d444 100644 --- a/indexer/server.go +++ b/indexer/server.go @@ -20,8 +20,7 @@ import ( accesslog "github.com/mash/go-accesslog" "github.com/mosuka/blast/config" "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/grpc" - "github.com/mosuka/blast/http" + "github.com/mosuka/blast/manager" "go.uber.org/zap" ) @@ -35,9 +34,9 @@ type Server struct { raftServer *RaftServer grpcService *GRPCService - grpcServer *grpc.Server - httpRouter *http.Router - httpServer *http.Server + grpcServer *GRPCServer + httpRouter *Router + httpServer *HTTPServer } func NewServer(clusterConfig *config.ClusterConfig, nodeConfig *config.NodeConfig, indexConfig *config.IndexConfig, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { @@ -56,7 +55,7 @@ func (s *Server) Start() { if s.clusterConfig.ManagerAddr != "" { s.logger.Info("connect to manager", zap.String("manager_addr", s.clusterConfig.ManagerAddr)) - mc, err := grpc.NewClient(s.clusterConfig.ManagerAddr) + mc, err := manager.NewGRPCClient(s.clusterConfig.ManagerAddr) defer func() { s.logger.Debug("close client", zap.String("address", mc.GetAddress())) err = mc.Close() @@ -108,7 +107,7 @@ func (s *Server) Start() { //get index config from manager or peer if s.clusterConfig.ManagerAddr != "" { - mc, err := grpc.NewClient(s.clusterConfig.ManagerAddr) + mc, err := manager.NewGRPCClient(s.clusterConfig.ManagerAddr) defer func() { s.logger.Debug("close client", zap.String("address", mc.GetAddress())) err = mc.Close() @@ -133,7 +132,7 @@ func (s *Server) Start() { s.indexConfig = config.NewIndexConfigFromMap(*value.(*map[string]interface{})) } } else if s.clusterConfig.PeerAddr != "" { - pc, err := grpc.NewClient(s.clusterConfig.PeerAddr) + pc, err := NewGRPCClient(s.clusterConfig.PeerAddr) defer func() { s.logger.Debug("close client", zap.String("address", pc.GetAddress())) err = pc.Close() @@ -180,7 +179,7 @@ func (s *Server) Start() { } // create gRPC server - s.grpcServer, err = grpc.NewServer(s.nodeConfig.GRPCAddr, s.grpcService, s.grpcLogger) + s.grpcServer, err = NewGRPCServer(s.nodeConfig.GRPCAddr, s.grpcService, s.grpcLogger) if err != nil { s.logger.Fatal(err.Error()) return @@ -194,7 +193,7 @@ func (s *Server) Start() { } // create HTTP server - s.httpServer, err = http.NewServer(s.nodeConfig.HTTPAddr, s.httpRouter, s.logger, s.httpLogger) + s.httpServer, err = NewHTTPServer(s.nodeConfig.HTTPAddr, s.httpRouter, s.logger, s.httpLogger) if err != nil { s.logger.Fatal(err.Error()) return @@ -236,7 +235,7 @@ func (s *Server) Start() { // join to the existing cluster if !bootstrap { - client, err := grpc.NewClient(s.clusterConfig.PeerAddr) + client, err := NewGRPCClient(s.clusterConfig.PeerAddr) defer func() { err := client.Close() if err != nil { diff --git a/indexer/server_test.go b/indexer/server_test.go index 8a9f604..58071bc 100644 --- a/indexer/server_test.go +++ b/indexer/server_test.go @@ -23,15 +23,13 @@ import ( "testing" "time" - "github.com/hashicorp/raft" - "github.com/blevesearch/bleve" + "github.com/hashicorp/raft" "github.com/mosuka/blast/config" "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/grpc" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/logutils" - "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/index" "github.com/mosuka/blast/testutils" ) @@ -119,7 +117,7 @@ func TestServer_LivenessProbe(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -137,7 +135,7 @@ func TestServer_LivenessProbe(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - expLiveness := protobuf.LivenessProbeResponse_ALIVE.String() + expLiveness := index.LivenessProbeResponse_ALIVE.String() actLiveness := liveness if expLiveness != actLiveness { t.Fatalf("expected content to see %v, saw %v", expLiveness, actLiveness) @@ -186,7 +184,7 @@ func TestServer_ReadinessProbe(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -204,7 +202,7 @@ func TestServer_ReadinessProbe(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - expReadiness := protobuf.ReadinessProbeResponse_READY.String() + expReadiness := index.ReadinessProbeResponse_READY.String() actReadiness := readiness if expReadiness != actReadiness { t.Fatalf("expected content to see %v, saw %v", expReadiness, actReadiness) @@ -253,7 +251,7 @@ func TestServer_GetNode(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -323,7 +321,7 @@ func TestServer_GetCluster(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -395,7 +393,7 @@ func TestServer_GetIndexMapping(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -470,7 +468,7 @@ func TestServer_GetIndexType(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -542,7 +540,7 @@ func TestServer_GetIndexStorageType(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -614,7 +612,7 @@ func TestServer_GetIndexStats(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -695,7 +693,7 @@ func TestServer_PutDocument(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -788,7 +786,7 @@ func TestServer_GetDocument(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -896,7 +894,7 @@ func TestServer_DeleteDocument(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -1033,7 +1031,7 @@ func TestServer_Search(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -1278,21 +1276,21 @@ func TestCluster_LivenessProbe(t *testing.T) { time.Sleep(5 * time.Second) // gRPC client for all servers - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) + client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) defer func() { _ = client1.Close() }() if err != nil { t.Fatalf("%v", err) } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) defer func() { _ = client2.Close() }() if err != nil { t.Fatalf("%v", err) } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) defer func() { _ = client3.Close() }() @@ -1305,7 +1303,7 @@ func TestCluster_LivenessProbe(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - expLiveness1 := protobuf.LivenessProbeResponse_ALIVE.String() + expLiveness1 := index.LivenessProbeResponse_ALIVE.String() actLiveness1 := liveness1 if expLiveness1 != actLiveness1 { t.Fatalf("expected content to see %v, saw %v", expLiveness1, actLiveness1) @@ -1316,7 +1314,7 @@ func TestCluster_LivenessProbe(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - expLiveness2 := protobuf.LivenessProbeResponse_ALIVE.String() + expLiveness2 := index.LivenessProbeResponse_ALIVE.String() actLiveness2 := liveness2 if expLiveness2 != actLiveness2 { t.Fatalf("expected content to see %v, saw %v", expLiveness2, actLiveness2) @@ -1327,7 +1325,7 @@ func TestCluster_LivenessProbe(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - expLiveness3 := protobuf.LivenessProbeResponse_ALIVE.String() + expLiveness3 := index.LivenessProbeResponse_ALIVE.String() actLiveness3 := liveness3 if expLiveness3 != actLiveness3 { t.Fatalf("expected content to see %v, saw %v", expLiveness3, actLiveness3) @@ -1415,21 +1413,21 @@ func TestCluster_ReadinessProbe(t *testing.T) { time.Sleep(5 * time.Second) // gRPC client for all servers - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) + client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) defer func() { _ = client1.Close() }() if err != nil { t.Fatalf("%v", err) } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) defer func() { _ = client2.Close() }() if err != nil { t.Fatalf("%v", err) } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) defer func() { _ = client3.Close() }() @@ -1442,7 +1440,7 @@ func TestCluster_ReadinessProbe(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - expReadiness1 := protobuf.ReadinessProbeResponse_READY.String() + expReadiness1 := index.ReadinessProbeResponse_READY.String() actReadiness1 := readiness1 if expReadiness1 != actReadiness1 { t.Fatalf("expected content to see %v, saw %v", expReadiness1, actReadiness1) @@ -1453,7 +1451,7 @@ func TestCluster_ReadinessProbe(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - expReadiness2 := protobuf.ReadinessProbeResponse_READY.String() + expReadiness2 := index.ReadinessProbeResponse_READY.String() actReadiness2 := readiness2 if expReadiness2 != actReadiness2 { t.Fatalf("expected content to see %v, saw %v", expReadiness2, actReadiness2) @@ -1464,7 +1462,7 @@ func TestCluster_ReadinessProbe(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - expReadiness3 := protobuf.ReadinessProbeResponse_READY.String() + expReadiness3 := index.ReadinessProbeResponse_READY.String() actReadiness3 := readiness3 if expReadiness3 != actReadiness3 { t.Fatalf("expected content to see %v, saw %v", expReadiness3, actReadiness3) @@ -1552,21 +1550,21 @@ func TestCluster_GetNode(t *testing.T) { time.Sleep(5 * time.Second) // gRPC client for all servers - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) + client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) defer func() { _ = client1.Close() }() if err != nil { t.Fatalf("%v", err) } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) defer func() { _ = client2.Close() }() if err != nil { t.Fatalf("%v", err) } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) defer func() { _ = client3.Close() }() @@ -1774,21 +1772,21 @@ func TestCluster_GetCluster(t *testing.T) { time.Sleep(5 * time.Second) // gRPC client for manager1 - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) + client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) defer func() { _ = client1.Close() }() if err != nil { t.Fatalf("%v", err) } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) defer func() { _ = client2.Close() }() if err != nil { t.Fatalf("%v", err) } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) defer func() { _ = client3.Close() }() diff --git a/manager/grpc_client.go b/manager/grpc_client.go new file mode 100644 index 0000000..aecc95a --- /dev/null +++ b/manager/grpc_client.go @@ -0,0 +1,294 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manager + +import ( + "context" + "errors" + "math" + + "github.com/golang/protobuf/ptypes/any" + "github.com/golang/protobuf/ptypes/empty" + blasterrors "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/management" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type GRPCClient struct { + ctx context.Context + cancel context.CancelFunc + conn *grpc.ClientConn + client management.ManagementClient +} + +func NewGRPCContext() (context.Context, context.CancelFunc) { + baseCtx := context.TODO() + //return context.WithTimeout(baseCtx, 60*time.Second) + return context.WithCancel(baseCtx) +} + +func NewGRPCClient(address string) (*GRPCClient, error) { + ctx, cancel := NewGRPCContext() + + //streamRetryOpts := []grpc_retry.CallOption{ + // grpc_retry.Disable(), + //} + + //unaryRetryOpts := []grpc_retry.CallOption{ + // grpc_retry.WithBackoff(grpc_retry.BackoffLinear(100 * time.Millisecond)), + // grpc_retry.WithCodes(codes.Unavailable), + // grpc_retry.WithMax(100), + //} + + dialOpts := []grpc.DialOption{ + grpc.WithInsecure(), + grpc.WithDefaultCallOptions( + grpc.MaxCallSendMsgSize(math.MaxInt32), + grpc.MaxCallRecvMsgSize(math.MaxInt32), + ), + //grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(streamRetryOpts...)), + //grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(unaryRetryOpts...)), + } + + conn, err := grpc.DialContext(ctx, address, dialOpts...) + if err != nil { + return nil, err + } + + return &GRPCClient{ + ctx: ctx, + cancel: cancel, + conn: conn, + client: management.NewManagementClient(conn), + }, nil +} + +func (c *GRPCClient) Cancel() { + c.cancel() +} + +func (c *GRPCClient) Close() error { + c.Cancel() + if c.conn != nil { + return c.conn.Close() + } + + return c.ctx.Err() +} + +func (c *GRPCClient) GetAddress() string { + return c.conn.Target() +} + +func (c *GRPCClient) LivenessProbe(opts ...grpc.CallOption) (string, error) { + resp, err := c.client.LivenessProbe(c.ctx, &empty.Empty{}) + if err != nil { + st, _ := status.FromError(err) + + return management.LivenessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) + } + + return resp.State.String(), nil +} + +func (c *GRPCClient) ReadinessProbe(opts ...grpc.CallOption) (string, error) { + resp, err := c.client.ReadinessProbe(c.ctx, &empty.Empty{}) + if err != nil { + st, _ := status.FromError(err) + + return management.ReadinessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) + } + + return resp.State.String(), nil +} + +func (c *GRPCClient) GetNode(id string, opts ...grpc.CallOption) (map[string]interface{}, error) { + req := &management.GetNodeRequest{ + Id: id, + } + + resp, err := c.client.GetNode(c.ctx, req, opts...) + if err != nil { + st, _ := status.FromError(err) + + return nil, errors.New(st.Message()) + } + + ins, err := protobuf.MarshalAny(resp.NodeConfig) + nodeConfig := *ins.(*map[string]interface{}) + + node := map[string]interface{}{ + "node_config": nodeConfig, + "state": resp.State, + } + + return node, nil +} + +func (c *GRPCClient) SetNode(id string, nodeConfig map[string]interface{}, opts ...grpc.CallOption) error { + nodeConfigAny := &any.Any{} + err := protobuf.UnmarshalAny(nodeConfig, nodeConfigAny) + if err != nil { + return err + } + + req := &management.SetNodeRequest{ + Id: id, + NodeConfig: nodeConfigAny, + } + + _, err = c.client.SetNode(c.ctx, req, opts...) + if err != nil { + return err + } + + return nil +} + +func (c *GRPCClient) DeleteNode(id string, opts ...grpc.CallOption) error { + req := &management.DeleteNodeRequest{ + Id: id, + } + + _, err := c.client.DeleteNode(c.ctx, req, opts...) + if err != nil { + return err + } + + return nil +} + +func (c *GRPCClient) GetCluster(opts ...grpc.CallOption) (map[string]interface{}, error) { + resp, err := c.client.GetCluster(c.ctx, &empty.Empty{}, opts...) + if err != nil { + st, _ := status.FromError(err) + + return nil, errors.New(st.Message()) + } + + ins, err := protobuf.MarshalAny(resp.Cluster) + cluster := *ins.(*map[string]interface{}) + + return cluster, nil +} + +func (c *GRPCClient) WatchCluster(opts ...grpc.CallOption) (management.Management_WatchClusterClient, error) { + req := &empty.Empty{} + + watchClient, err := c.client.WatchCluster(c.ctx, req, opts...) + if err != nil { + st, _ := status.FromError(err) + return nil, errors.New(st.Message()) + } + + return watchClient, nil +} + +func (c *GRPCClient) GetValue(key string, opts ...grpc.CallOption) (interface{}, error) { + req := &management.GetValueRequest{ + Key: key, + } + + resp, err := c.client.GetValue(c.ctx, req, opts...) + if err != nil { + st, _ := status.FromError(err) + + switch st.Code() { + case codes.NotFound: + return nil, blasterrors.ErrNotFound + default: + return nil, errors.New(st.Message()) + } + } + + value, err := protobuf.MarshalAny(resp.Value) + + return value, nil +} + +func (c *GRPCClient) SetValue(key string, value interface{}, opts ...grpc.CallOption) error { + valueAny := &any.Any{} + err := protobuf.UnmarshalAny(value, valueAny) + if err != nil { + return err + } + + req := &management.SetValueRequest{ + Key: key, + Value: valueAny, + } + + _, err = c.client.SetValue(c.ctx, req, opts...) + if err != nil { + st, _ := status.FromError(err) + + switch st.Code() { + case codes.NotFound: + return blasterrors.ErrNotFound + default: + return errors.New(st.Message()) + } + } + + return nil +} + +func (c *GRPCClient) DeleteValue(key string, opts ...grpc.CallOption) error { + req := &management.DeleteValueRequest{ + Key: key, + } + + _, err := c.client.DeleteValue(c.ctx, req, opts...) + if err != nil { + st, _ := status.FromError(err) + + switch st.Code() { + case codes.NotFound: + return blasterrors.ErrNotFound + default: + return errors.New(st.Message()) + } + } + + return nil +} + +func (c *GRPCClient) WatchStore(key string, opts ...grpc.CallOption) (management.Management_WatchStoreClient, error) { + req := &management.WatchStoreRequest{ + Key: key, + } + + watchClient, err := c.client.WatchStore(c.ctx, req, opts...) + if err != nil { + st, _ := status.FromError(err) + return nil, errors.New(st.Message()) + } + + return watchClient, nil +} + +func (c *GRPCClient) Snapshot(opts ...grpc.CallOption) error { + _, err := c.client.Snapshot(c.ctx, &empty.Empty{}) + if err != nil { + st, _ := status.FromError(err) + + return errors.New(st.Message()) + } + + return nil +} diff --git a/manager/grpc_server.go b/manager/grpc_server.go new file mode 100644 index 0000000..e49645b --- /dev/null +++ b/manager/grpc_server.go @@ -0,0 +1,94 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manager + +import ( + "net" + + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/mosuka/blast/protobuf/management" + "go.uber.org/zap" + "google.golang.org/grpc" + //grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" + //grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" + //grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags" + //grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" +) + +type GRPCServer struct { + service management.ManagementServer + server *grpc.Server + listener net.Listener + + logger *zap.Logger +} + +func NewGRPCServer(grpcAddr string, service management.ManagementServer, logger *zap.Logger) (*GRPCServer, error) { + server := grpc.NewServer( + grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( + //grpc_ctxtags.StreamServerInterceptor(), + //grpc_opentracing.StreamServerInterceptor(), + grpc_prometheus.StreamServerInterceptor, + grpc_zap.StreamServerInterceptor(logger), + //grpc_auth.StreamServerInterceptor(myAuthFunction), + //grpc_recovery.StreamServerInterceptor(), + )), + grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( + //grpc_ctxtags.UnaryServerInterceptor(), + //grpc_opentracing.UnaryServerInterceptor(), + grpc_prometheus.UnaryServerInterceptor, + grpc_zap.UnaryServerInterceptor(logger), + //grpc_auth.UnaryServerInterceptor(myAuthFunction), + //grpc_recovery.UnaryServerInterceptor(), + )), + ) + + management.RegisterManagementServer(server, service) + + grpc_prometheus.EnableHandlingTimeHistogram() + grpc_prometheus.Register(server) + + listener, err := net.Listen("tcp", grpcAddr) + if err != nil { + return nil, err + } + + return &GRPCServer{ + service: service, + server: server, + listener: listener, + logger: logger, + }, nil +} + +func (s *GRPCServer) Start() error { + s.logger.Info("start server") + err := s.server.Serve(s.listener) + if err != nil { + return err + } + + return nil +} + +func (s *GRPCServer) Stop() error { + s.logger.Info("stop server") + s.server.Stop() + //s.server.GracefulStop() + + return nil +} diff --git a/manager/grpc_service.go b/manager/grpc_service.go index 3b6a0f4..e15848b 100644 --- a/manager/grpc_service.go +++ b/manager/grpc_service.go @@ -26,15 +26,15 @@ import ( "github.com/golang/protobuf/ptypes/empty" "github.com/hashicorp/raft" blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/grpc" "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/management" "go.uber.org/zap" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) type GRPCService struct { - *grpc.Service + //*grpc.Service raftServer *RaftServer logger *zap.Logger @@ -42,12 +42,12 @@ type GRPCService struct { updateClusterStopCh chan struct{} updateClusterDoneCh chan struct{} peers map[string]interface{} - peerClients map[string]*grpc.Client + peerClients map[string]*GRPCClient cluster map[string]interface{} - clusterChans map[chan protobuf.GetClusterResponse]struct{} + clusterChans map[chan management.GetClusterResponse]struct{} clusterMutex sync.RWMutex - stateChans map[chan protobuf.WatchStoreResponse]struct{} + stateChans map[chan management.WatchStoreResponse]struct{} stateMutex sync.RWMutex } @@ -57,11 +57,11 @@ func NewGRPCService(raftServer *RaftServer, logger *zap.Logger) (*GRPCService, e logger: logger, peers: make(map[string]interface{}, 0), - peerClients: make(map[string]*grpc.Client, 0), + peerClients: make(map[string]*GRPCClient, 0), cluster: make(map[string]interface{}, 0), - clusterChans: make(map[chan protobuf.GetClusterResponse]struct{}), + clusterChans: make(map[chan management.GetClusterResponse]struct{}), - stateChans: make(map[chan protobuf.WatchStoreResponse]struct{}), + stateChans: make(map[chan management.WatchStoreResponse]struct{}), }, nil } @@ -79,8 +79,8 @@ func (s *GRPCService) Stop() error { return nil } -func (s *GRPCService) getLeaderClient() (*grpc.Client, error) { - var client *grpc.Client +func (s *GRPCService) getLeaderClient() (*GRPCClient, error) { + var client *GRPCClient for id, node := range s.cluster { state, ok := node.(map[string]interface{})["state"].(string) @@ -170,7 +170,7 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { s.logger.Warn(err.Error(), zap.String("node_id", nodeId)) } - newClient, err := grpc.NewClient(grpcAddr) + newClient, err := NewGRPCClient(grpcAddr) if err != nil { s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) } @@ -185,7 +185,7 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - peerClient, err := grpc.NewClient(grpcAddr) + peerClient, err := NewGRPCClient(grpcAddr) if err != nil { s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) } @@ -220,7 +220,7 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { // notify current cluster if !reflect.DeepEqual(s.cluster, cluster) { // convert to GetClusterResponse for channel output - clusterResp := &protobuf.GetClusterResponse{} + clusterResp := &management.GetClusterResponse{} clusterAny := &any.Any{} err = protobuf.UnmarshalAny(cluster, clusterAny) if err != nil { @@ -247,10 +247,11 @@ func (s *GRPCService) stopUpdateCluster() { s.logger.Info("close all peer clients") for id, client := range s.peerClients { s.logger.Debug("close peer client", zap.String("id", id), zap.String("address", client.GetAddress())) - err := client.Close() - if err != nil { - s.logger.Warn(err.Error()) - } + _ = client.Close() + //err := client.Close() + //if err != nil { + // s.logger.Warn(err.Error()) + //} } if s.updateClusterStopCh != nil { @@ -263,6 +264,22 @@ func (s *GRPCService) stopUpdateCluster() { s.logger.Info("the cluster update has been stopped") } +func (s *GRPCService) LivenessProbe(ctx context.Context, req *empty.Empty) (*management.LivenessProbeResponse, error) { + resp := &management.LivenessProbeResponse{ + State: management.LivenessProbeResponse_ALIVE, + } + + return resp, nil +} + +func (s *GRPCService) ReadinessProbe(ctx context.Context, req *empty.Empty) (*management.ReadinessProbeResponse, error) { + resp := &management.ReadinessProbeResponse{ + State: management.ReadinessProbeResponse_READY, + } + + return resp, nil +} + func (s *GRPCService) NodeID() string { return s.raftServer.NodeID() } @@ -316,8 +333,8 @@ func (s *GRPCService) getNode(id string) (map[string]interface{}, error) { return nodeInfo, nil } -func (s *GRPCService) GetNode(ctx context.Context, req *protobuf.GetNodeRequest) (*protobuf.GetNodeResponse, error) { - resp := &protobuf.GetNodeResponse{} +func (s *GRPCService) GetNode(ctx context.Context, req *management.GetNodeRequest) (*management.GetNodeResponse, error) { + resp := &management.GetNodeResponse{} nodeInfo, err := s.getNode(req.Id) if err != nil { @@ -372,7 +389,7 @@ func (s *GRPCService) setNode(id string, nodeConfig map[string]interface{}) erro return nil } -func (s *GRPCService) SetNode(ctx context.Context, req *protobuf.SetNodeRequest) (*empty.Empty, error) { +func (s *GRPCService) SetNode(ctx context.Context, req *management.SetNodeRequest) (*empty.Empty, error) { resp := &empty.Empty{} ins, err := protobuf.MarshalAny(req.NodeConfig) @@ -416,7 +433,7 @@ func (s *GRPCService) deleteNode(id string) error { return nil } -func (s *GRPCService) DeleteNode(ctx context.Context, req *protobuf.DeleteNodeRequest) (*empty.Empty, error) { +func (s *GRPCService) DeleteNode(ctx context.Context, req *management.DeleteNodeRequest) (*empty.Empty, error) { resp := &empty.Empty{} err := s.deleteNode(req.Id) @@ -453,8 +470,8 @@ func (s *GRPCService) getCluster() (map[string]interface{}, error) { return cluster, nil } -func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*protobuf.GetClusterResponse, error) { - resp := &protobuf.GetClusterResponse{} +func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*management.GetClusterResponse, error) { + resp := &management.GetClusterResponse{} cluster, err := s.getCluster() if err != nil { @@ -474,8 +491,8 @@ func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*protob return resp, nil } -func (s *GRPCService) WatchCluster(req *empty.Empty, server protobuf.Blast_WatchClusterServer) error { - chans := make(chan protobuf.GetClusterResponse) +func (s *GRPCService) WatchCluster(req *empty.Empty, server management.Management_WatchClusterServer) error { + chans := make(chan management.GetClusterResponse) s.clusterMutex.Lock() s.clusterChans[chans] = struct{}{} @@ -499,30 +516,13 @@ func (s *GRPCService) WatchCluster(req *empty.Empty, server protobuf.Blast_Watch return nil } -func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { - s.stateMutex.Lock() - defer func() { - s.stateMutex.Unlock() - }() - - resp := &empty.Empty{} - - err := s.raftServer.Snapshot() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) GetValue(ctx context.Context, req *protobuf.GetValueRequest) (*protobuf.GetValueResponse, error) { +func (s *GRPCService) GetValue(ctx context.Context, req *management.GetValueRequest) (*management.GetValueResponse, error) { s.stateMutex.RLock() defer func() { s.stateMutex.RUnlock() }() - resp := &protobuf.GetValueResponse{} + resp := &management.GetValueResponse{} value, err := s.raftServer.GetValue(req.Key) if err != nil { @@ -547,7 +547,7 @@ func (s *GRPCService) GetValue(ctx context.Context, req *protobuf.GetValueReques return resp, nil } -func (s *GRPCService) SetValue(ctx context.Context, req *protobuf.SetValueRequest) (*empty.Empty, error) { +func (s *GRPCService) SetValue(ctx context.Context, req *management.SetValueRequest) (*empty.Empty, error) { s.stateMutex.Lock() defer func() { s.stateMutex.Unlock() @@ -588,8 +588,8 @@ func (s *GRPCService) SetValue(ctx context.Context, req *protobuf.SetValueReques // notify for c := range s.stateChans { - c <- protobuf.WatchStoreResponse{ - Command: protobuf.WatchStoreResponse_SET, + c <- management.WatchStoreResponse{ + Command: management.WatchStoreResponse_SET, Key: req.Key, Value: req.Value, } @@ -598,7 +598,7 @@ func (s *GRPCService) SetValue(ctx context.Context, req *protobuf.SetValueReques return resp, nil } -func (s *GRPCService) DeleteValue(ctx context.Context, req *protobuf.DeleteValueRequest) (*empty.Empty, error) { +func (s *GRPCService) DeleteValue(ctx context.Context, req *management.DeleteValueRequest) (*empty.Empty, error) { s.stateMutex.Lock() defer func() { s.stateMutex.Unlock() @@ -633,8 +633,8 @@ func (s *GRPCService) DeleteValue(ctx context.Context, req *protobuf.DeleteValue // notify for c := range s.stateChans { - c <- protobuf.WatchStoreResponse{ - Command: protobuf.WatchStoreResponse_DELETE, + c <- management.WatchStoreResponse{ + Command: management.WatchStoreResponse_DELETE, Key: req.Key, } } @@ -642,8 +642,8 @@ func (s *GRPCService) DeleteValue(ctx context.Context, req *protobuf.DeleteValue return resp, nil } -func (s *GRPCService) WatchStore(req *protobuf.WatchStoreRequest, server protobuf.Blast_WatchStoreServer) error { - chans := make(chan protobuf.WatchStoreResponse) +func (s *GRPCService) WatchStore(req *management.WatchStoreRequest, server management.Management_WatchStoreServer) error { + chans := make(chan management.WatchStoreResponse) s.stateMutex.Lock() s.stateChans[chans] = struct{}{} @@ -669,3 +669,20 @@ func (s *GRPCService) WatchStore(req *protobuf.WatchStoreRequest, server protobu return nil } + +func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { + s.stateMutex.Lock() + defer func() { + s.stateMutex.Unlock() + }() + + resp := &empty.Empty{} + + err := s.raftServer.Snapshot() + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} diff --git a/manager/http_router.go b/manager/http_router.go index 969cccf..5ccea8c 100644 --- a/manager/http_router.go +++ b/manager/http_router.go @@ -22,19 +22,30 @@ import ( "github.com/gorilla/mux" blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/grpc" blasthttp "github.com/mosuka/blast/http" "github.com/mosuka/blast/version" "github.com/prometheus/client_golang/prometheus/promhttp" "go.uber.org/zap" ) -func NewRouter(grpcAddr string, logger *zap.Logger) (*blasthttp.Router, error) { - router, err := blasthttp.NewRouter(grpcAddr, logger) +type Router struct { + mux.Router + + GRPCClient *GRPCClient + logger *zap.Logger +} + +func NewRouter(grpcAddr string, logger *zap.Logger) (*Router, error) { + grpcClient, err := NewGRPCClient(grpcAddr) if err != nil { return nil, err } + router := &Router{ + GRPCClient: grpcClient, + logger: logger, + } + router.StrictSlash(true) router.Handle("/", NewRootHandler(logger)).Methods("GET") @@ -49,6 +60,17 @@ func NewRouter(grpcAddr string, logger *zap.Logger) (*blasthttp.Router, error) { return router, nil } +func (r *Router) Close() error { + r.GRPCClient.Cancel() + + err := r.GRPCClient.Close() + if err != nil { + return err + } + + return nil +} + type RootHandler struct { logger *zap.Logger } @@ -80,11 +102,11 @@ func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } type GetHandler struct { - client *grpc.Client + client *GRPCClient logger *zap.Logger } -func NewGetHandler(client *grpc.Client, logger *zap.Logger) *GetHandler { +func NewGetHandler(client *GRPCClient, logger *zap.Logger) *GetHandler { return &GetHandler{ client: client, logger: logger, @@ -148,11 +170,11 @@ func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } type PutHandler struct { - client *grpc.Client + client *GRPCClient logger *zap.Logger } -func NewPutHandler(client *grpc.Client, logger *zap.Logger) *PutHandler { +func NewPutHandler(client *GRPCClient, logger *zap.Logger) *PutHandler { return &PutHandler{ client: client, logger: logger, @@ -230,11 +252,11 @@ func (h *PutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } type DeleteHandler struct { - client *grpc.Client + client *GRPCClient logger *zap.Logger } -func NewDeleteHandler(client *grpc.Client, logger *zap.Logger) *DeleteHandler { +func NewDeleteHandler(client *GRPCClient, logger *zap.Logger) *DeleteHandler { return &DeleteHandler{ client: client, logger: logger, diff --git a/http/router.go b/manager/http_server.go similarity index 52% rename from http/router.go rename to manager/http_server.go index 40a9a92..33bd0fc 100644 --- a/http/router.go +++ b/manager/http_server.go @@ -12,39 +12,55 @@ // See the License for the specific language governing permissions and // limitations under the License. -package http +package manager import ( - "github.com/gorilla/mux" - "github.com/mosuka/blast/grpc" + "net" + "net/http" + + accesslog "github.com/mash/go-accesslog" "go.uber.org/zap" ) -type Router struct { - mux.Router +type HTTPServer struct { + listener net.Listener + router *Router - GRPCClient *grpc.Client logger *zap.Logger + httpLogger accesslog.Logger } -func NewRouter(grpcAddr string, logger *zap.Logger) (*Router, error) { - grpcClient, err := grpc.NewClient(grpcAddr) +func NewHTTPServer(httpAddr string, router *Router, logger *zap.Logger, httpLogger accesslog.Logger) (*HTTPServer, error) { + listener, err := net.Listen("tcp", httpAddr) if err != nil { return nil, err } - router := &Router{ - GRPCClient: grpcClient, + return &HTTPServer{ + listener: listener, + router: router, logger: logger, + httpLogger: httpLogger, + }, nil +} + +func (s *HTTPServer) Start() error { + err := http.Serve( + s.listener, + accesslog.NewLoggingHandler( + s.router, + s.httpLogger, + ), + ) + if err != nil { + return err } - return router, nil + return nil } -func (r *Router) Close() error { - r.GRPCClient.Cancel() - - err := r.GRPCClient.Close() +func (s *HTTPServer) Stop() error { + err := s.listener.Close() if err != nil { return err } diff --git a/manager/raft_fsm_test.go b/manager/raft_fsm_test.go index 1107951..8bb2c97 100644 --- a/manager/raft_fsm_test.go +++ b/manager/raft_fsm_test.go @@ -49,7 +49,7 @@ func TestRaftFSM_GetNode(t *testing.T) { } }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } _ = fsm.SetNodeConfig("node1", map[string]interface{}{ @@ -70,7 +70,7 @@ func TestRaftFSM_GetNode(t *testing.T) { val1, err := fsm.GetNodeConfig("node2") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp1 := map[string]interface{}{ @@ -80,7 +80,7 @@ func TestRaftFSM_GetNode(t *testing.T) { } act1 := val1 if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + t.Fatalf("expected content to see %v, saw %v", exp1, act1) } } @@ -88,12 +88,12 @@ func TestRaftFSM_GetNode(t *testing.T) { func TestRaftFSM_SetNode(t *testing.T) { tmp, err := ioutil.TempDir("", "") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } defer func() { err := os.RemoveAll(tmp) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } }() @@ -101,17 +101,17 @@ func TestRaftFSM_SetNode(t *testing.T) { fsm, err := NewRaftFSM(tmp, logger) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } err = fsm.Start() defer func() { err := fsm.Stop() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } _ = fsm.SetNodeConfig("node1", map[string]interface{}{ @@ -132,7 +132,7 @@ func TestRaftFSM_SetNode(t *testing.T) { val1, err := fsm.GetNodeConfig("node2") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp1 := map[string]interface{}{ "bind_addr": ":16061", @@ -141,7 +141,7 @@ func TestRaftFSM_SetNode(t *testing.T) { } act1 := val1 if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + t.Fatalf("expected content to see %v, saw %v", exp1, act1) } _ = fsm.SetNodeConfig("node2", map[string]interface{}{ @@ -153,7 +153,7 @@ func TestRaftFSM_SetNode(t *testing.T) { val2, err := fsm.GetNodeConfig("node2") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp2 := map[string]interface{}{ "bind_addr": ":16061", @@ -163,19 +163,19 @@ func TestRaftFSM_SetNode(t *testing.T) { } act2 := val2 if !reflect.DeepEqual(exp2, act2) { - t.Errorf("expected content to see %v, saw %v", exp2, act2) + t.Fatalf("expected content to see %v, saw %v", exp2, act2) } } func TestRaftFSM_DeleteNode(t *testing.T) { tmp, err := ioutil.TempDir("", "") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } defer func() { err := os.RemoveAll(tmp) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } }() @@ -183,17 +183,17 @@ func TestRaftFSM_DeleteNode(t *testing.T) { fsm, err := NewRaftFSM(tmp, logger) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } err = fsm.Start() defer func() { err := fsm.Stop() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } _ = fsm.SetNodeConfig("node1", map[string]interface{}{ @@ -214,7 +214,7 @@ func TestRaftFSM_DeleteNode(t *testing.T) { val1, err := fsm.GetNodeConfig("node2") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp1 := map[string]interface{}{ "bind_addr": ":16061", @@ -223,34 +223,34 @@ func TestRaftFSM_DeleteNode(t *testing.T) { } act1 := val1 if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + t.Fatalf("expected content to see %v, saw %v", exp1, act1) } err = fsm.DeleteNodeConfig("node2") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } val2, err := fsm.GetNodeConfig("node2") if err == nil { - t.Errorf("expected error: %v", err) + t.Fatalf("expected error: %v", err) } act1 = val2 if reflect.DeepEqual(nil, act1) { - t.Errorf("expected content to see nil, saw %v", act1) + t.Fatalf("expected content to see nil, saw %v", act1) } } func TestRaftFSM_Get(t *testing.T) { tmp, err := ioutil.TempDir("", "") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } defer func() { err := os.RemoveAll(tmp) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } }() @@ -258,45 +258,45 @@ func TestRaftFSM_Get(t *testing.T) { fsm, err := NewRaftFSM(tmp, logger) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } err = fsm.Start() defer func() { err := fsm.Stop() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } err = fsm.SetValue("/", map[string]interface{}{"a": 1}, false) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } value, err := fsm.GetValue("/a") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expectedValue := 1 actualValue := value if expectedValue != actualValue { - t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) + t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) } } func TestRaftFSM_Set(t *testing.T) { tmp, err := ioutil.TempDir("", "") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } defer func() { err := os.RemoveAll(tmp) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } }() @@ -304,17 +304,17 @@ func TestRaftFSM_Set(t *testing.T) { fsm, err := NewRaftFSM(tmp, logger) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } err = fsm.Start() defer func() { err := fsm.Stop() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // set {"a": 1} @@ -322,18 +322,18 @@ func TestRaftFSM_Set(t *testing.T) { "a": 1, }, false) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } val1, err := fsm.GetValue("/") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp1 := map[string]interface{}{ "a": 1, } act1 := val1 if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + t.Fatalf("expected content to see %v, saw %v", exp1, act1) } // merge {"a": "A"} @@ -341,18 +341,18 @@ func TestRaftFSM_Set(t *testing.T) { "a": "A", }, true) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } val2, err := fsm.GetValue("/") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp2 := map[string]interface{}{ "a": "A", } act2 := val2 if !reflect.DeepEqual(exp2, act2) { - t.Errorf("expected content to see %v, saw %v", exp2, act2) + t.Fatalf("expected content to see %v, saw %v", exp2, act2) } // set {"a": {"b": "AB"}} @@ -362,12 +362,12 @@ func TestRaftFSM_Set(t *testing.T) { }, }, false) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } val3, err := fsm.GetValue("/") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp3 := map[string]interface{}{ "a": map[string]interface{}{ @@ -376,7 +376,7 @@ func TestRaftFSM_Set(t *testing.T) { } act3 := val3 if !reflect.DeepEqual(exp3, act3) { - t.Errorf("expected content to see %v, saw %v", exp3, act3) + t.Fatalf("expected content to see %v, saw %v", exp3, act3) } // merge {"a": {"c": "AC"}} @@ -386,11 +386,11 @@ func TestRaftFSM_Set(t *testing.T) { }, }, true) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } val4, err := fsm.GetValue("/") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp4 := map[string]interface{}{ "a": map[string]interface{}{ @@ -400,7 +400,7 @@ func TestRaftFSM_Set(t *testing.T) { } act4 := val4 if !reflect.DeepEqual(exp4, act4) { - t.Errorf("expected content to see %v, saw %v", exp4, act4) + t.Fatalf("expected content to see %v, saw %v", exp4, act4) } // set {"a": 1} @@ -408,18 +408,18 @@ func TestRaftFSM_Set(t *testing.T) { "a": 1, }, false) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } val5, err := fsm.GetValue("/") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp5 := map[string]interface{}{ "a": 1, } act5 := val5 if !reflect.DeepEqual(exp5, act5) { - t.Errorf("expected content to see %v, saw %v", exp5, act5) + t.Fatalf("expected content to see %v, saw %v", exp5, act5) } // TODO: merge {"a": {"c": "AC"}} @@ -430,7 +430,7 @@ func TestRaftFSM_Set(t *testing.T) { //}, true) //val6, err := fsm.Get("/") //if err != nil { - // t.Errorf("%v", err) + // t.Fatalf("%v", err) //} //exp6 := map[string]interface{}{ // "a": map[string]interface{}{ @@ -439,19 +439,19 @@ func TestRaftFSM_Set(t *testing.T) { //} //act6 := val6 //if !reflect.DeepEqual(exp6, act6) { - // t.Errorf("expected content to see %v, saw %v", exp6, act6) + // t.Fatalf("expected content to see %v, saw %v", exp6, act6) //} } func TestRaftFSM_Delete(t *testing.T) { tmp, err := ioutil.TempDir("", "") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } defer func() { err := os.RemoveAll(tmp) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } }() @@ -459,47 +459,47 @@ func TestRaftFSM_Delete(t *testing.T) { fsm, err := NewRaftFSM(tmp, logger) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } err = fsm.Start() defer func() { err := fsm.Stop() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } err = fsm.SetValue("/", map[string]interface{}{"a": 1}, false) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } value, err := fsm.GetValue("/a") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expectedValue := 1 actualValue := value if expectedValue != actualValue { - t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) + t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) } err = fsm.DeleteValue("/a") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } value, err = fsm.GetValue("/a") if err == nil { - t.Errorf("expected nil: %v", err) + t.Fatalf("expected nil: %v", err) } actualValue = value if nil != actualValue { - t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) + t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) } } diff --git a/manager/server.go b/manager/server.go index bbef34b..8dae8d5 100644 --- a/manager/server.go +++ b/manager/server.go @@ -17,8 +17,6 @@ package manager import ( accesslog "github.com/mash/go-accesslog" "github.com/mosuka/blast/config" - "github.com/mosuka/blast/grpc" - "github.com/mosuka/blast/http" "go.uber.org/zap" ) @@ -32,9 +30,9 @@ type Server struct { raftServer *RaftServer grpcService *GRPCService - grpcServer *grpc.Server - httpRouter *http.Router - httpServer *http.Server + grpcServer *GRPCServer + httpRouter *Router + httpServer *HTTPServer } func NewServer(clusterConfig *config.ClusterConfig, nodeConfig *config.NodeConfig, indexConfig *config.IndexConfig, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { @@ -70,7 +68,7 @@ func (s *Server) Start() { } // create gRPC server - s.grpcServer, err = grpc.NewServer(s.nodeConfig.GRPCAddr, s.grpcService, s.grpcLogger) + s.grpcServer, err = NewGRPCServer(s.nodeConfig.GRPCAddr, s.grpcService, s.grpcLogger) if err != nil { s.logger.Fatal(err.Error()) return @@ -84,7 +82,7 @@ func (s *Server) Start() { } // create HTTP server - s.httpServer, err = http.NewServer(s.nodeConfig.HTTPAddr, s.httpRouter, s.logger, s.httpLogger) + s.httpServer, err = NewHTTPServer(s.nodeConfig.HTTPAddr, s.httpRouter, s.logger, s.httpLogger) if err != nil { s.logger.Error(err.Error()) return @@ -126,7 +124,7 @@ func (s *Server) Start() { // join to the existing cluster if !bootstrap { - client, err := grpc.NewClient(s.clusterConfig.PeerAddr) + client, err := NewGRPCClient(s.clusterConfig.PeerAddr) defer func() { err := client.Close() if err != nil { diff --git a/manager/server_test.go b/manager/server_test.go index bc6113f..9aca154 100644 --- a/manager/server_test.go +++ b/manager/server_test.go @@ -24,10 +24,9 @@ import ( "github.com/hashicorp/raft" "github.com/mosuka/blast/config" blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/grpc" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/logutils" - "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/management" "github.com/mosuka/blast/testutils" ) @@ -121,7 +120,7 @@ func TestServer_LivenessProbe(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -139,7 +138,7 @@ func TestServer_LivenessProbe(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - expLiveness := protobuf.LivenessProbeResponse_ALIVE.String() + expLiveness := management.LivenessProbeResponse_ALIVE.String() actLiveness := liveness if expLiveness != actLiveness { t.Fatalf("expected content to see %v, saw %v", expLiveness, actLiveness) @@ -191,7 +190,7 @@ func TestServer_ReadinessProbe(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -209,7 +208,7 @@ func TestServer_ReadinessProbe(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - expReadiness := protobuf.ReadinessProbeResponse_READY.String() + expReadiness := management.ReadinessProbeResponse_READY.String() actReadiness := readiness if expReadiness != actReadiness { t.Fatalf("expected content to see %v, saw %v", expReadiness, actReadiness) @@ -261,7 +260,7 @@ func TestServer_GetNode(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -277,7 +276,7 @@ func TestServer_GetNode(t *testing.T) { // get node nodeInfo, err := client.GetNode(nodeConfig.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNodeInfo := map[string]interface{}{ "node_config": nodeConfig.ToMap(), @@ -285,7 +284,7 @@ func TestServer_GetNode(t *testing.T) { } actNodeInfo := nodeInfo if !reflect.DeepEqual(expNodeInfo, actNodeInfo) { - t.Errorf("expected content to see %v, saw %v", expNodeInfo, actNodeInfo) + t.Fatalf("expected content to see %v, saw %v", expNodeInfo, actNodeInfo) } } @@ -334,7 +333,7 @@ func TestServer_GetCluster(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -350,7 +349,7 @@ func TestServer_GetCluster(t *testing.T) { // get cluster cluster, err := client.GetCluster() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expCluster := map[string]interface{}{ nodeConfig.NodeId: map[string]interface{}{ @@ -360,7 +359,7 @@ func TestServer_GetCluster(t *testing.T) { } actCluster := cluster if !reflect.DeepEqual(expCluster, actCluster) { - t.Errorf("expected content to see %v, saw %v", expCluster, actCluster) + t.Fatalf("expected content to see %v, saw %v", expCluster, actCluster) } } @@ -409,7 +408,7 @@ func TestServer_GetIndexMapping(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -438,7 +437,7 @@ func TestServer_GetIndexMapping(t *testing.T) { } if !reflect.DeepEqual(expIndexMapping, actIndexMapping) { - t.Errorf("expected content to see %v, saw %v", expIndexMapping, actIndexMapping) + t.Fatalf("expected content to see %v, saw %v", expIndexMapping, actIndexMapping) } } @@ -487,7 +486,7 @@ func TestServer_GetIndexType(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -502,16 +501,16 @@ func TestServer_GetIndexType(t *testing.T) { expIndexType := indexConfig.IndexType if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } actIndexType, err := client.GetValue("index_config/index_type") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } if expIndexType != *actIndexType.(*string) { - t.Errorf("expected content to see %v, saw %v", expIndexType, *actIndexType.(*string)) + t.Fatalf("expected content to see %v, saw %v", expIndexType, *actIndexType.(*string)) } } @@ -560,7 +559,7 @@ func TestServer_GetIndexStorageType(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -575,16 +574,16 @@ func TestServer_GetIndexStorageType(t *testing.T) { expIndexStorageType := indexConfig.IndexStorageType if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } actIndexStorageType, err := client.GetValue("index_config/index_storage_type") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } if expIndexStorageType != *actIndexStorageType.(*string) { - t.Errorf("expected content to see %v, saw %v", expIndexStorageType, *actIndexStorageType.(*string)) + t.Fatalf("expected content to see %v, saw %v", expIndexStorageType, *actIndexStorageType.(*string)) } } @@ -633,7 +632,7 @@ func TestServer_SetState(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -649,13 +648,13 @@ func TestServer_SetState(t *testing.T) { // set value err = client.SetValue("test/key1", "val1") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // get value val1, err := client.GetValue("test/key1") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal1 := "val1" @@ -663,7 +662,7 @@ func TestServer_SetState(t *testing.T) { actVal1 := *val1.(*string) if expVal1 != actVal1 { - t.Errorf("expected content to see %v, saw %v", expVal1, actVal1) + t.Fatalf("expected content to see %v, saw %v", expVal1, actVal1) } } @@ -712,7 +711,7 @@ func TestServer_GetState(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -728,13 +727,13 @@ func TestServer_GetState(t *testing.T) { // set value err = client.SetValue("test/key1", "val1") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // get value val1, err := client.GetValue("test/key1") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal1 := "val1" @@ -742,7 +741,7 @@ func TestServer_GetState(t *testing.T) { actVal1 := *val1.(*string) if expVal1 != actVal1 { - t.Errorf("expected content to see %v, saw %v", expVal1, actVal1) + t.Fatalf("expected content to see %v, saw %v", expVal1, actVal1) } } @@ -791,7 +790,7 @@ func TestServer_DeleteState(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := grpc.NewClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(nodeConfig.GRPCAddr) defer func() { if client != nil { err = client.Close() @@ -807,13 +806,13 @@ func TestServer_DeleteState(t *testing.T) { // set value err = client.SetValue("test/key1", "val1") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // get value val1, err := client.GetValue("test/key1") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal1 := "val1" @@ -821,28 +820,28 @@ func TestServer_DeleteState(t *testing.T) { actVal1 := *val1.(*string) if expVal1 != actVal1 { - t.Errorf("expected content to see %v, saw %v", expVal1, actVal1) + t.Fatalf("expected content to see %v, saw %v", expVal1, actVal1) } // delete value err = client.DeleteValue("test/key1") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } val1, err = client.GetValue("test/key1") if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) + t.Fatalf("%v", err) } if val1 != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // delete non-existing data err = client.DeleteValue("test/non-existing") if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) + t.Fatalf("%v", err) } } @@ -1008,59 +1007,59 @@ func TestCluster_LivenessProbe(t *testing.T) { time.Sleep(5 * time.Second) // gRPC client for all servers - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) + client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) defer func() { _ = client1.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) defer func() { _ = client2.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) defer func() { _ = client3.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // liveness check for manager1 liveness1, err := client1.LivenessProbe() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - expLiveness1 := protobuf.LivenessProbeResponse_ALIVE.String() + expLiveness1 := management.LivenessProbeResponse_ALIVE.String() actLiveness1 := liveness1 if expLiveness1 != actLiveness1 { - t.Errorf("expected content to see %v, saw %v", expLiveness1, actLiveness1) + t.Fatalf("expected content to see %v, saw %v", expLiveness1, actLiveness1) } // liveness check for manager2 liveness2, err := client2.LivenessProbe() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - expLiveness2 := protobuf.LivenessProbeResponse_ALIVE.String() + expLiveness2 := management.LivenessProbeResponse_ALIVE.String() actLiveness2 := liveness2 if expLiveness2 != actLiveness2 { - t.Errorf("expected content to see %v, saw %v", expLiveness2, actLiveness2) + t.Fatalf("expected content to see %v, saw %v", expLiveness2, actLiveness2) } // liveness check for manager3 liveness3, err := client3.LivenessProbe() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - expLiveness3 := protobuf.LivenessProbeResponse_ALIVE.String() + expLiveness3 := management.LivenessProbeResponse_ALIVE.String() actLiveness3 := liveness3 if expLiveness3 != actLiveness3 { - t.Errorf("expected content to see %v, saw %v", expLiveness3, actLiveness3) + t.Fatalf("expected content to see %v, saw %v", expLiveness3, actLiveness3) } } @@ -1145,59 +1144,59 @@ func TestCluster_ReadinessProbe(t *testing.T) { time.Sleep(5 * time.Second) // gRPC client for all servers - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) + client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) defer func() { _ = client1.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) defer func() { _ = client2.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) defer func() { _ = client3.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // readiness check for manager1 readiness1, err := client1.ReadinessProbe() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - expReadiness1 := protobuf.ReadinessProbeResponse_READY.String() + expReadiness1 := management.ReadinessProbeResponse_READY.String() actReadiness1 := readiness1 if expReadiness1 != actReadiness1 { - t.Errorf("expected content to see %v, saw %v", expReadiness1, actReadiness1) + t.Fatalf("expected content to see %v, saw %v", expReadiness1, actReadiness1) } // readiness check for manager2 readiness2, err := client2.ReadinessProbe() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - expReadiness2 := protobuf.ReadinessProbeResponse_READY.String() + expReadiness2 := management.ReadinessProbeResponse_READY.String() actReadiness2 := readiness2 if expReadiness2 != actReadiness2 { - t.Errorf("expected content to see %v, saw %v", expReadiness2, actReadiness2) + t.Fatalf("expected content to see %v, saw %v", expReadiness2, actReadiness2) } // readiness check for manager3 readiness3, err := client3.ReadinessProbe() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - expReadiness3 := protobuf.ReadinessProbeResponse_READY.String() + expReadiness3 := management.ReadinessProbeResponse_READY.String() actReadiness3 := readiness3 if expReadiness3 != actReadiness3 { - t.Errorf("expected content to see %v, saw %v", expReadiness3, actReadiness3) + t.Fatalf("expected content to see %v, saw %v", expReadiness3, actReadiness3) } } @@ -1282,32 +1281,32 @@ func TestCluster_GetNode(t *testing.T) { time.Sleep(5 * time.Second) // gRPC client for all servers - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) + client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) defer func() { _ = client1.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) defer func() { _ = client2.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) defer func() { _ = client3.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // get all node info from all nodes node11, err := client1.GetNode(nodeConfig1.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNode11 := map[string]interface{}{ "node_config": server1.nodeConfig.ToMap(), @@ -1315,12 +1314,12 @@ func TestCluster_GetNode(t *testing.T) { } actNode11 := node11 if !reflect.DeepEqual(expNode11, actNode11) { - t.Errorf("expected content to see %v, saw %v", expNode11, actNode11) + t.Fatalf("expected content to see %v, saw %v", expNode11, actNode11) } node12, err := client1.GetNode(nodeConfig2.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNode12 := map[string]interface{}{ "node_config": server2.nodeConfig.ToMap(), @@ -1328,12 +1327,12 @@ func TestCluster_GetNode(t *testing.T) { } actNode12 := node12 if !reflect.DeepEqual(expNode12, actNode12) { - t.Errorf("expected content to see %v, saw %v", expNode12, actNode12) + t.Fatalf("expected content to see %v, saw %v", expNode12, actNode12) } node13, err := client1.GetNode(nodeConfig3.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNode13 := map[string]interface{}{ "node_config": server3.nodeConfig.ToMap(), @@ -1341,12 +1340,12 @@ func TestCluster_GetNode(t *testing.T) { } actNode13 := node13 if !reflect.DeepEqual(expNode13, actNode13) { - t.Errorf("expected content to see %v, saw %v", expNode13, actNode13) + t.Fatalf("expected content to see %v, saw %v", expNode13, actNode13) } node21, err := client2.GetNode(nodeConfig1.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNode21 := map[string]interface{}{ "node_config": server1.nodeConfig.ToMap(), @@ -1354,12 +1353,12 @@ func TestCluster_GetNode(t *testing.T) { } actNode21 := node21 if !reflect.DeepEqual(expNode21, actNode21) { - t.Errorf("expected content to see %v, saw %v", expNode21, actNode21) + t.Fatalf("expected content to see %v, saw %v", expNode21, actNode21) } node22, err := client2.GetNode(nodeConfig2.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNode22 := map[string]interface{}{ "node_config": server2.nodeConfig.ToMap(), @@ -1367,12 +1366,12 @@ func TestCluster_GetNode(t *testing.T) { } actNode22 := node22 if !reflect.DeepEqual(expNode22, actNode22) { - t.Errorf("expected content to see %v, saw %v", expNode22, actNode22) + t.Fatalf("expected content to see %v, saw %v", expNode22, actNode22) } node23, err := client2.GetNode(nodeConfig3.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNode23 := map[string]interface{}{ "node_config": server3.nodeConfig.ToMap(), @@ -1380,12 +1379,12 @@ func TestCluster_GetNode(t *testing.T) { } actNode23 := node23 if !reflect.DeepEqual(expNode23, actNode23) { - t.Errorf("expected content to see %v, saw %v", expNode23, actNode23) + t.Fatalf("expected content to see %v, saw %v", expNode23, actNode23) } node31, err := client3.GetNode(nodeConfig1.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNode31 := map[string]interface{}{ "node_config": server1.nodeConfig.ToMap(), @@ -1393,12 +1392,12 @@ func TestCluster_GetNode(t *testing.T) { } actNode31 := node31 if !reflect.DeepEqual(expNode31, actNode31) { - t.Errorf("expected content to see %v, saw %v", expNode31, actNode31) + t.Fatalf("expected content to see %v, saw %v", expNode31, actNode31) } node32, err := client3.GetNode(nodeConfig2.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNode32 := map[string]interface{}{ "node_config": server2.nodeConfig.ToMap(), @@ -1406,12 +1405,12 @@ func TestCluster_GetNode(t *testing.T) { } actNode32 := node32 if !reflect.DeepEqual(expNode32, actNode32) { - t.Errorf("expected content to see %v, saw %v", expNode32, actNode32) + t.Fatalf("expected content to see %v, saw %v", expNode32, actNode32) } node33, err := client3.GetNode(nodeConfig3.NodeId) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expNode33 := map[string]interface{}{ "node_config": server3.nodeConfig.ToMap(), @@ -1419,7 +1418,7 @@ func TestCluster_GetNode(t *testing.T) { } actNode33 := node33 if !reflect.DeepEqual(expNode33, actNode33) { - t.Errorf("expected content to see %v, saw %v", expNode33, actNode33) + t.Fatalf("expected content to see %v, saw %v", expNode33, actNode33) } } @@ -1504,32 +1503,32 @@ func TestCluster_GetCluster(t *testing.T) { time.Sleep(5 * time.Second) // gRPC client for manager1 - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) + client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) defer func() { _ = client1.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) defer func() { _ = client2.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) defer func() { _ = client3.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // get cluster info from manager1 cluster1, err := client1.GetCluster() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expCluster1 := map[string]interface{}{ nodeConfig1.NodeId: map[string]interface{}{ @@ -1547,12 +1546,12 @@ func TestCluster_GetCluster(t *testing.T) { } actCluster1 := cluster1 if !reflect.DeepEqual(expCluster1, actCluster1) { - t.Errorf("expected content to see %v, saw %v", expCluster1, actCluster1) + t.Fatalf("expected content to see %v, saw %v", expCluster1, actCluster1) } cluster2, err := client2.GetCluster() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expCluster2 := map[string]interface{}{ nodeConfig1.NodeId: map[string]interface{}{ @@ -1570,12 +1569,12 @@ func TestCluster_GetCluster(t *testing.T) { } actCluster2 := cluster2 if !reflect.DeepEqual(expCluster2, actCluster2) { - t.Errorf("expected content to see %v, saw %v", expCluster2, actCluster2) + t.Fatalf("expected content to see %v, saw %v", expCluster2, actCluster2) } cluster3, err := client3.GetCluster() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expCluster3 := map[string]interface{}{ nodeConfig1.NodeId: map[string]interface{}{ @@ -1593,7 +1592,7 @@ func TestCluster_GetCluster(t *testing.T) { } actCluster3 := cluster3 if !reflect.DeepEqual(expCluster3, actCluster3) { - t.Errorf("expected content to see %v, saw %v", expCluster3, actCluster3) + t.Fatalf("expected content to see %v, saw %v", expCluster3, actCluster3) } } @@ -1678,57 +1677,57 @@ func TestCluster_GetState(t *testing.T) { time.Sleep(5 * time.Second) // gRPC client for manager1 - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) + client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) defer func() { _ = client1.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) defer func() { _ = client2.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) defer func() { _ = client3.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // get index mapping from all nodes indexConfig1, err := client1.GetValue("index_config") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expIndexConfig1 := indexConfig.ToMap() actIndexConfig1 := *indexConfig1.(*map[string]interface{}) if !reflect.DeepEqual(expIndexConfig1, actIndexConfig1) { - t.Errorf("expected content to see %v, saw %v", expIndexConfig1, actIndexConfig1) + t.Fatalf("expected content to see %v, saw %v", expIndexConfig1, actIndexConfig1) } indexConfig2, err := client2.GetValue("index_config") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expIndexConfig2 := indexConfig.ToMap() actIndexConfig2 := *indexConfig2.(*map[string]interface{}) if !reflect.DeepEqual(expIndexConfig2, actIndexConfig2) { - t.Errorf("expected content to see %v, saw %v", expIndexConfig2, actIndexConfig2) + t.Fatalf("expected content to see %v, saw %v", expIndexConfig2, actIndexConfig2) } indexConfig3, err := client3.GetValue("index_config") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expIndexConfig3 := indexConfig.ToMap() actIndexConfig3 := *indexConfig3.(*map[string]interface{}) if !reflect.DeepEqual(expIndexConfig3, actIndexConfig3) { - t.Errorf("expected content to see %v, saw %v", expIndexConfig3, actIndexConfig3) + t.Fatalf("expected content to see %v, saw %v", expIndexConfig3, actIndexConfig3) } } @@ -1813,131 +1812,131 @@ func TestCluster_SetState(t *testing.T) { time.Sleep(5 * time.Second) // gRPC client for manager1 - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) + client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) defer func() { _ = client1.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) defer func() { _ = client2.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) defer func() { _ = client3.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } err = client1.SetValue("test/key1", "val1") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes val11, err := client1.GetValue("test/key1") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal11 := "val1" actVal11 := *val11.(*string) if expVal11 != actVal11 { - t.Errorf("expected content to see %v, saw %v", expVal11, actVal11) + t.Fatalf("expected content to see %v, saw %v", expVal11, actVal11) } val21, err := client2.GetValue("test/key1") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal21 := "val1" actVal21 := *val21.(*string) if expVal21 != actVal21 { - t.Errorf("expected content to see %v, saw %v", expVal21, actVal21) + t.Fatalf("expected content to see %v, saw %v", expVal21, actVal21) } val31, err := client3.GetValue("test/key1") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal31 := "val1" actVal31 := *val31.(*string) if expVal31 != actVal31 { - t.Errorf("expected content to see %v, saw %v", expVal31, actVal31) + t.Fatalf("expected content to see %v, saw %v", expVal31, actVal31) } err = client2.SetValue("test/key2", "val2") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes val12, err := client1.GetValue("test/key2") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal12 := "val2" actVal12 := *val12.(*string) if expVal12 != actVal12 { - t.Errorf("expected content to see %v, saw %v", expVal12, actVal12) + t.Fatalf("expected content to see %v, saw %v", expVal12, actVal12) } val22, err := client2.GetValue("test/key2") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal22 := "val2" actVal22 := *val22.(*string) if expVal22 != actVal22 { - t.Errorf("expected content to see %v, saw %v", expVal22, actVal22) + t.Fatalf("expected content to see %v, saw %v", expVal22, actVal22) } val32, err := client3.GetValue("test/key2") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal32 := "val2" actVal32 := *val32.(*string) if expVal32 != actVal32 { - t.Errorf("expected content to see %v, saw %v", expVal32, actVal32) + t.Fatalf("expected content to see %v, saw %v", expVal32, actVal32) } err = client3.SetValue("test/key3", "val3") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes val13, err := client1.GetValue("test/key3") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal13 := "val3" actVal13 := *val13.(*string) if expVal13 != actVal13 { - t.Errorf("expected content to see %v, saw %v", expVal13, actVal13) + t.Fatalf("expected content to see %v, saw %v", expVal13, actVal13) } val23, err := client2.GetValue("test/key3") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal23 := "val3" actVal23 := *val23.(*string) if expVal23 != actVal23 { - t.Errorf("expected content to see %v, saw %v", expVal23, actVal23) + t.Fatalf("expected content to see %v, saw %v", expVal23, actVal23) } val33, err := client3.GetValue("test/key3") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal33 := "val3" actVal33 := *val33.(*string) if expVal33 != actVal33 { - t.Errorf("expected content to see %v, saw %v", expVal33, actVal33) + t.Fatalf("expected content to see %v, saw %v", expVal33, actVal33) } } @@ -2022,237 +2021,237 @@ func TestCluster_DeleteState(t *testing.T) { time.Sleep(5 * time.Second) // gRPC client for manager1 - client1, err := grpc.NewClient(nodeConfig1.GRPCAddr) + client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) defer func() { _ = client1.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - client2, err := grpc.NewClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) defer func() { _ = client2.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } - client3, err := grpc.NewClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) defer func() { _ = client3.Close() }() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // set test data before delete err = client1.SetValue("test/key1", "val1") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes val11, err := client1.GetValue("test/key1") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal11 := "val1" actVal11 := *val11.(*string) if expVal11 != actVal11 { - t.Errorf("expected content to see %v, saw %v", expVal11, actVal11) + t.Fatalf("expected content to see %v, saw %v", expVal11, actVal11) } val21, err := client2.GetValue("test/key1") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal21 := "val1" actVal21 := *val21.(*string) if expVal21 != actVal21 { - t.Errorf("expected content to see %v, saw %v", expVal21, actVal21) + t.Fatalf("expected content to see %v, saw %v", expVal21, actVal21) } val31, err := client3.GetValue("test/key1") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal31 := "val1" actVal31 := *val31.(*string) if expVal31 != actVal31 { - t.Errorf("expected content to see %v, saw %v", expVal31, actVal31) + t.Fatalf("expected content to see %v, saw %v", expVal31, actVal31) } err = client2.SetValue("test/key2", "val2") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes val12, err := client1.GetValue("test/key2") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal12 := "val2" actVal12 := *val12.(*string) if expVal12 != actVal12 { - t.Errorf("expected content to see %v, saw %v", expVal12, actVal12) + t.Fatalf("expected content to see %v, saw %v", expVal12, actVal12) } val22, err := client2.GetValue("test/key2") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal22 := "val2" actVal22 := *val22.(*string) if expVal22 != actVal22 { - t.Errorf("expected content to see %v, saw %v", expVal22, actVal22) + t.Fatalf("expected content to see %v, saw %v", expVal22, actVal22) } val32, err := client3.GetValue("test/key2") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal32 := "val2" actVal32 := *val32.(*string) if expVal32 != actVal32 { - t.Errorf("expected content to see %v, saw %v", expVal32, actVal32) + t.Fatalf("expected content to see %v, saw %v", expVal32, actVal32) } err = client3.SetValue("test/key3", "val3") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes val13, err := client1.GetValue("test/key3") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal13 := "val3" actVal13 := *val13.(*string) if expVal13 != actVal13 { - t.Errorf("expected content to see %v, saw %v", expVal13, actVal13) + t.Fatalf("expected content to see %v, saw %v", expVal13, actVal13) } val23, err := client2.GetValue("test/key3") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal23 := "val3" actVal23 := *val23.(*string) if expVal23 != actVal23 { - t.Errorf("expected content to see %v, saw %v", expVal23, actVal23) + t.Fatalf("expected content to see %v, saw %v", expVal23, actVal23) } val33, err := client3.GetValue("test/key3") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expVal33 := "val3" actVal33 := *val33.(*string) if expVal33 != actVal33 { - t.Errorf("expected content to see %v, saw %v", expVal33, actVal33) + t.Fatalf("expected content to see %v, saw %v", expVal33, actVal33) } // delete err = client1.DeleteValue("test/key1") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes val11, err = client1.GetValue("test/key1") if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) + t.Fatalf("%v", err) } if val11 != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } val21, err = client2.GetValue("test/key1") if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) + t.Fatalf("%v", err) } if val21 != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } val31, err = client3.GetValue("test/key1") if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) + t.Fatalf("%v", err) } if val31 != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } err = client2.DeleteValue("test/key2") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes val12, err = client1.GetValue("test/key2") if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) + t.Fatalf("%v", err) } if val12 != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } val22, err = client2.GetValue("test/key2") if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) + t.Fatalf("%v", err) } if val22 != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } val32, err = client3.GetValue("test/key2") if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) + t.Fatalf("%v", err) } if val32 != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } err = client3.DeleteValue("test/key3") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes val13, err = client1.GetValue("test/key3") if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) + t.Fatalf("%v", err) } if val13 != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } val23, err = client2.GetValue("test/key3") if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) + t.Fatalf("%v", err) } if val23 != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } val33, err = client3.GetValue("test/key3") if err != blasterrors.ErrNotFound { - t.Errorf("%v", err) + t.Fatalf("%v", err) } if val33 != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // delete non-existing data from manager1 err = client1.DeleteValue("test/non-existing") if err == nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // delete non-existing data from manager2 err = client2.DeleteValue("test/non-existing") if err == nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } // delete non-existing data from manager3 err = client3.DeleteValue("test/non-existing") if err == nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } } diff --git a/maputils/maputils_test.go b/maputils/maputils_test.go index c6c175d..9fb2bcb 100644 --- a/maputils/maputils_test.go +++ b/maputils/maputils_test.go @@ -26,7 +26,7 @@ func Test_splitKey(t *testing.T) { exp1 := []string{"a", "b", "c", "d"} act1 := keys1 if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + t.Fatalf("expected content to see %v, saw %v", exp1, act1) } key2 := "/" @@ -34,7 +34,7 @@ func Test_splitKey(t *testing.T) { exp2 := make([]string, 0) act2 := keys2 if !reflect.DeepEqual(exp2, act2) { - t.Errorf("expected content to see %v, saw %v", exp2, act2) + t.Fatalf("expected content to see %v, saw %v", exp2, act2) } key3 := "" @@ -42,7 +42,7 @@ func Test_splitKey(t *testing.T) { exp3 := make([]string, 0) act3 := keys3 if !reflect.DeepEqual(exp3, act3) { - t.Errorf("expected content to see %v, saw %v", exp3, act3) + t.Fatalf("expected content to see %v, saw %v", exp3, act3) } } @@ -52,7 +52,7 @@ func Test_makeSelector(t *testing.T) { exp1 := "a.b.c.d" act1 := selector1 if exp1 != act1 { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + t.Fatalf("expected content to see %v, saw %v", exp1, act1) } key2 := "/" @@ -60,7 +60,7 @@ func Test_makeSelector(t *testing.T) { exp2 := "" act2 := selector2 if exp2 != act2 { - t.Errorf("expected content to see %v, saw %v", exp2, act2) + t.Fatalf("expected content to see %v, saw %v", exp2, act2) } key3 := "" @@ -68,7 +68,7 @@ func Test_makeSelector(t *testing.T) { exp3 := "" act3 := selector3 if exp3 != act3 { - t.Errorf("expected content to see %v, saw %v", exp3, act3) + t.Fatalf("expected content to see %v, saw %v", exp3, act3) } } @@ -100,7 +100,7 @@ func Test_normalize(t *testing.T) { } act1 := val1 if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + t.Fatalf("expected content to see %v, saw %v", exp1, act1) } } @@ -115,7 +115,7 @@ func Test_makeMap(t *testing.T) { } act1 := val1 if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + t.Fatalf("expected content to see %v, saw %v", exp1, act1) } val2 := makeMap("a/b", map[string]interface{}{"c": "C"}).(Map) @@ -128,7 +128,7 @@ func Test_makeMap(t *testing.T) { } act2 := val2 if !reflect.DeepEqual(exp2, act2) { - t.Errorf("expected content to see %v, saw %v", exp2, act2) + t.Fatalf("expected content to see %v, saw %v", exp2, act2) } } @@ -159,7 +159,7 @@ func TestMap_FromMap(t *testing.T) { } act1 := map1 if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + t.Fatalf("expected content to see %v, saw %v", exp1, act1) } } @@ -191,7 +191,7 @@ func TestMap_ToMap(t *testing.T) { } act1 := val1 if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + t.Fatalf("expected content to see %v, saw %v", exp1, act1) } } @@ -205,7 +205,7 @@ func Test_FromYAML(t *testing.T) { - ae2 `)) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp1 := Map{ "a": Map{ @@ -221,7 +221,7 @@ func Test_FromYAML(t *testing.T) { } act1 := map1 if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + t.Fatalf("expected content to see %v, saw %v", exp1, act1) } } @@ -241,7 +241,7 @@ func Test_ToYAML(t *testing.T) { val1, err := map1.ToYAML() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp1 := []byte(`a: b: @@ -253,14 +253,14 @@ func Test_ToYAML(t *testing.T) { `) act1 := val1 if !bytes.Equal(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + t.Fatalf("expected content to see %v, saw %v", exp1, act1) } } func Test_FromJSON(t *testing.T) { map1, err := FromJSON([]byte(`{"a":{"b":{"c":"abc","d":"abd"},"e":["ae1","ae2"]}}`)) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp1 := Map{ "a": Map{ @@ -276,7 +276,7 @@ func Test_FromJSON(t *testing.T) { } act1 := map1 if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + t.Fatalf("expected content to see %v, saw %v", exp1, act1) } } @@ -295,12 +295,12 @@ func Test_ToJSON(t *testing.T) { } val1, err := map1.ToJSON() if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp1 := []byte(`{"a":{"b":{"c":"abc","d":"abd"},"e":["ae1","ae2"]}}`) act1 := val1 if !bytes.Equal(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + t.Fatalf("expected content to see %v, saw %v", exp1, act1) } } @@ -320,22 +320,22 @@ func Test_Has(t *testing.T) { val1, err := map1.Has("a/b/c") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp1 := true act1 := val1 if exp1 != act1 { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + t.Fatalf("expected content to see %v, saw %v", exp1, act1) } val2, err := map1.Get("a/b/f") if err != ErrNotFound { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp2 := false act2 := val2 if exp2 == act2 { - t.Errorf("expected content to see %v, saw %v", exp2, act2) + t.Fatalf("expected content to see %v, saw %v", exp2, act2) } } @@ -344,55 +344,55 @@ func Test_Set(t *testing.T) { err := map1.Set("/", Map{"a": "A"}) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp1 := Map{ "a": "A", } act1 := map1 if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + t.Fatalf("expected content to see %v, saw %v", exp1, act1) } err = map1.Set("/", Map{"A": "a"}) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp2 := Map{ "A": "a", } act2 := map1 if !reflect.DeepEqual(exp2, act2) { - t.Errorf("expected content to see %v, saw %v", exp2, act2) + t.Fatalf("expected content to see %v, saw %v", exp2, act2) } err = map1.Set("/", Map{"A": 1}) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp3 := Map{ "A": 1, } act3 := map1 if !reflect.DeepEqual(exp3, act3) { - t.Errorf("expected content to see %v, saw %v", exp2, act2) + t.Fatalf("expected content to see %v, saw %v", exp2, act2) } err = map1.Set("/A", "AAA") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp4 := Map{ "A": "AAA", } act4 := map1 if !reflect.DeepEqual(exp4, act4) { - t.Errorf("expected content to see %v, saw %v", exp4, act4) + t.Fatalf("expected content to see %v, saw %v", exp4, act4) } err = map1.Set("/B", "BBB") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp5 := Map{ "A": "AAA", @@ -400,12 +400,12 @@ func Test_Set(t *testing.T) { } act5 := map1 if !reflect.DeepEqual(exp5, act5) { - t.Errorf("expected content to see %v, saw %v", exp5, act5) + t.Fatalf("expected content to see %v, saw %v", exp5, act5) } err = map1.Set("/C", map[string]interface{}{"D": "CCC-DDD"}) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp6 := Map{ "A": "AAA", @@ -416,7 +416,7 @@ func Test_Set(t *testing.T) { } act6 := map1 if !reflect.DeepEqual(exp6, act6) { - t.Errorf("expected content to see %v, saw %v", exp6, act6) + t.Fatalf("expected content to see %v, saw %v", exp6, act6) } } @@ -425,43 +425,43 @@ func Test_Merge(t *testing.T) { err := map1.Merge("/", Map{"a": "A"}) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp1 := Map{ "a": "A", } act1 := map1 if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + t.Fatalf("expected content to see %v, saw %v", exp1, act1) } err = map1.Merge("/a", "a") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp2 := Map{ "a": "a", } act2 := map1 if !reflect.DeepEqual(exp2, act2) { - t.Errorf("expected content to see %v, saw %v", exp2, act2) + t.Fatalf("expected content to see %v, saw %v", exp2, act2) } err = map1.Merge("/", Map{"a": 1}) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp3 := Map{ "a": 1, } act3 := map1 if !reflect.DeepEqual(exp3, act3) { - t.Errorf("expected content to see %v, saw %v", exp3, act3) + t.Fatalf("expected content to see %v, saw %v", exp3, act3) } err = map1.Merge("/", Map{"b": 2}) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp4 := Map{ "a": 1, @@ -469,12 +469,12 @@ func Test_Merge(t *testing.T) { } act4 := map1 if !reflect.DeepEqual(exp4, act4) { - t.Errorf("expected content to see %v, saw %v", exp4, act4) + t.Fatalf("expected content to see %v, saw %v", exp4, act4) } err = map1.Merge("/c", 3) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp5 := Map{ "a": 1, @@ -483,7 +483,7 @@ func Test_Merge(t *testing.T) { } act5 := map1 if !reflect.DeepEqual(exp5, act5) { - t.Errorf("expected content to see %v, saw %v", exp5, act5) + t.Fatalf("expected content to see %v, saw %v", exp5, act5) } } @@ -504,17 +504,17 @@ func Test_Get(t *testing.T) { val1, err := map1.Get("a/b/c") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp1 := "abc" act1 := val1 if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + t.Fatalf("expected content to see %v, saw %v", exp1, act1) } val2, err := map1.Get("a") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp2 := Map{ "b": Map{ @@ -528,7 +528,7 @@ func Test_Get(t *testing.T) { } act2 := val2 if !reflect.DeepEqual(exp2, act2) { - t.Errorf("expected content to see %v, saw %v", exp2, act2) + t.Fatalf("expected content to see %v, saw %v", exp2, act2) } } @@ -548,7 +548,7 @@ func Test_Delete(t *testing.T) { err := map1.Delete("a/b/c") if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } exp1 := Map{ "a": Map{ @@ -563,7 +563,7 @@ func Test_Delete(t *testing.T) { } act1 := map1 if !reflect.DeepEqual(exp1, act1) { - t.Errorf("expected content to see %v, saw %v", exp1, act1) + t.Fatalf("expected content to see %v, saw %v", exp1, act1) } } @@ -584,7 +584,7 @@ func Test_Delete(t *testing.T) { // key1 := "/" // val1, err := Get(data1, key1) // if err != nil { -// t.Errorf("%v", err) +// t.Fatalf("%v", err) // } // exp1 := map[string]interface{}{ // "a": map[string]interface{}{ @@ -600,13 +600,13 @@ func Test_Delete(t *testing.T) { // } // act1 := val1 // if !reflect.DeepEqual(exp1, act1) { -// t.Errorf("expected content to see %v, saw %v", exp1, act1) +// t.Fatalf("expected content to see %v, saw %v", exp1, act1) // } // // key2 := "/a" // val2, err := Get(data1, key2) // if err != nil { -// t.Errorf("%v", err) +// t.Fatalf("%v", err) // } // exp2 := map[string]interface{}{ // "b": map[string]interface{}{ @@ -620,7 +620,7 @@ func Test_Delete(t *testing.T) { // } // act2 := val2 // if !reflect.DeepEqual(exp2, act2) { -// t.Errorf("expected content to see %v, saw %v", exp2, act2) +// t.Fatalf("expected content to see %v, saw %v", exp2, act2) // } //} @@ -629,51 +629,51 @@ func Test_Delete(t *testing.T) { // // data, err := Set(data, "/", map[string]interface{}{"a": 1}, true) // if err != nil { -// t.Errorf("%v", err) +// t.Fatalf("%v", err) // } // // exp1 := 1 // act1 := val1 // if exp1 != act1 { -// t.Errorf("expected content to see %v, saw %v", exp1, act1) +// t.Fatalf("expected content to see %v, saw %v", exp1, act1) // } // // fsm.applySet("/b/bb", map[string]interface{}{"b": 1}, false) // // val2, err := fsm.Get("/b") // if err != nil { -// t.Errorf("%v", err) +// t.Fatalf("%v", err) // } // // exp2 := map[string]interface{}{"bb": map[string]interface{}{"b": 1}} // act2 := val2.(map[string]interface{}) // if !reflect.DeepEqual(exp2, act2) { -// t.Errorf("expected content to see %v, saw %v", exp2, act2) +// t.Fatalf("expected content to see %v, saw %v", exp2, act2) // } // // fsm.applySet("/", map[string]interface{}{"a": 1}, false) // // val3, err := fsm.Get("/") // if err != nil { -// t.Errorf("%v", err) +// t.Fatalf("%v", err) // } // // exp3 := map[string]interface{}{"a": 1} // act3 := val3 // if !reflect.DeepEqual(exp3, act3) { -// t.Errorf("expected content to see %v, saw %v", exp3, act3) +// t.Fatalf("expected content to see %v, saw %v", exp3, act3) // } // // fsm.applySet("/", map[string]interface{}{"b": 2}, true) // // val4, err := fsm.Get("/") // if err != nil { -// t.Errorf("%v", err) +// t.Fatalf("%v", err) // } // // exp4 := map[string]interface{}{"a": 1, "b": 2} // act4 := val4 // if !reflect.DeepEqual(exp4, act4) { -// t.Errorf("expected content to see %v, saw %v", exp4, act4) +// t.Fatalf("expected content to see %v, saw %v", exp4, act4) // } //} diff --git a/protobuf/distribute/distribute.pb.go b/protobuf/distribute/distribute.pb.go new file mode 100644 index 0000000..b935dea --- /dev/null +++ b/protobuf/distribute/distribute.pb.go @@ -0,0 +1,843 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: protobuf/distribute/distribute.proto + +package distribute + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + empty "github.com/golang/protobuf/ptypes/empty" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type LivenessProbeResponse_State int32 + +const ( + LivenessProbeResponse_UNKNOWN LivenessProbeResponse_State = 0 + LivenessProbeResponse_ALIVE LivenessProbeResponse_State = 1 + LivenessProbeResponse_DEAD LivenessProbeResponse_State = 2 +) + +var LivenessProbeResponse_State_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ALIVE", + 2: "DEAD", +} + +var LivenessProbeResponse_State_value = map[string]int32{ + "UNKNOWN": 0, + "ALIVE": 1, + "DEAD": 2, +} + +func (x LivenessProbeResponse_State) String() string { + return proto.EnumName(LivenessProbeResponse_State_name, int32(x)) +} + +func (LivenessProbeResponse_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{0, 0} +} + +type ReadinessProbeResponse_State int32 + +const ( + ReadinessProbeResponse_UNKNOWN ReadinessProbeResponse_State = 0 + ReadinessProbeResponse_READY ReadinessProbeResponse_State = 1 + ReadinessProbeResponse_NOT_READY ReadinessProbeResponse_State = 2 +) + +var ReadinessProbeResponse_State_name = map[int32]string{ + 0: "UNKNOWN", + 1: "READY", + 2: "NOT_READY", +} + +var ReadinessProbeResponse_State_value = map[string]int32{ + "UNKNOWN": 0, + "READY": 1, + "NOT_READY": 2, +} + +func (x ReadinessProbeResponse_State) String() string { + return proto.EnumName(ReadinessProbeResponse_State_name, int32(x)) +} + +func (ReadinessProbeResponse_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{1, 0} +} + +// use for health check +type LivenessProbeResponse struct { + State LivenessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=distribute.LivenessProbeResponse_State" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LivenessProbeResponse) Reset() { *m = LivenessProbeResponse{} } +func (m *LivenessProbeResponse) String() string { return proto.CompactTextString(m) } +func (*LivenessProbeResponse) ProtoMessage() {} +func (*LivenessProbeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{0} +} + +func (m *LivenessProbeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LivenessProbeResponse.Unmarshal(m, b) +} +func (m *LivenessProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LivenessProbeResponse.Marshal(b, m, deterministic) +} +func (m *LivenessProbeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LivenessProbeResponse.Merge(m, src) +} +func (m *LivenessProbeResponse) XXX_Size() int { + return xxx_messageInfo_LivenessProbeResponse.Size(m) +} +func (m *LivenessProbeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LivenessProbeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LivenessProbeResponse proto.InternalMessageInfo + +func (m *LivenessProbeResponse) GetState() LivenessProbeResponse_State { + if m != nil { + return m.State + } + return LivenessProbeResponse_UNKNOWN +} + +// use for health check +type ReadinessProbeResponse struct { + State ReadinessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=distribute.ReadinessProbeResponse_State" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadinessProbeResponse) Reset() { *m = ReadinessProbeResponse{} } +func (m *ReadinessProbeResponse) String() string { return proto.CompactTextString(m) } +func (*ReadinessProbeResponse) ProtoMessage() {} +func (*ReadinessProbeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{1} +} + +func (m *ReadinessProbeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadinessProbeResponse.Unmarshal(m, b) +} +func (m *ReadinessProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadinessProbeResponse.Marshal(b, m, deterministic) +} +func (m *ReadinessProbeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadinessProbeResponse.Merge(m, src) +} +func (m *ReadinessProbeResponse) XXX_Size() int { + return xxx_messageInfo_ReadinessProbeResponse.Size(m) +} +func (m *ReadinessProbeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReadinessProbeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadinessProbeResponse proto.InternalMessageInfo + +func (m *ReadinessProbeResponse) GetState() ReadinessProbeResponse_State { + if m != nil { + return m.State + } + return ReadinessProbeResponse_UNKNOWN +} + +type GetDocumentRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDocumentRequest) Reset() { *m = GetDocumentRequest{} } +func (m *GetDocumentRequest) String() string { return proto.CompactTextString(m) } +func (*GetDocumentRequest) ProtoMessage() {} +func (*GetDocumentRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{2} +} + +func (m *GetDocumentRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDocumentRequest.Unmarshal(m, b) +} +func (m *GetDocumentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDocumentRequest.Marshal(b, m, deterministic) +} +func (m *GetDocumentRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDocumentRequest.Merge(m, src) +} +func (m *GetDocumentRequest) XXX_Size() int { + return xxx_messageInfo_GetDocumentRequest.Size(m) +} +func (m *GetDocumentRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetDocumentRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDocumentRequest proto.InternalMessageInfo + +func (m *GetDocumentRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type GetDocumentResponse struct { + Fields *any.Any `protobuf:"bytes,1,opt,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDocumentResponse) Reset() { *m = GetDocumentResponse{} } +func (m *GetDocumentResponse) String() string { return proto.CompactTextString(m) } +func (*GetDocumentResponse) ProtoMessage() {} +func (*GetDocumentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{3} +} + +func (m *GetDocumentResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDocumentResponse.Unmarshal(m, b) +} +func (m *GetDocumentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDocumentResponse.Marshal(b, m, deterministic) +} +func (m *GetDocumentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDocumentResponse.Merge(m, src) +} +func (m *GetDocumentResponse) XXX_Size() int { + return xxx_messageInfo_GetDocumentResponse.Size(m) +} +func (m *GetDocumentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetDocumentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDocumentResponse proto.InternalMessageInfo + +func (m *GetDocumentResponse) GetFields() *any.Any { + if m != nil { + return m.Fields + } + return nil +} + +type IndexDocumentRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IndexDocumentRequest) Reset() { *m = IndexDocumentRequest{} } +func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) } +func (*IndexDocumentRequest) ProtoMessage() {} +func (*IndexDocumentRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{4} +} + +func (m *IndexDocumentRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IndexDocumentRequest.Unmarshal(m, b) +} +func (m *IndexDocumentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IndexDocumentRequest.Marshal(b, m, deterministic) +} +func (m *IndexDocumentRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_IndexDocumentRequest.Merge(m, src) +} +func (m *IndexDocumentRequest) XXX_Size() int { + return xxx_messageInfo_IndexDocumentRequest.Size(m) +} +func (m *IndexDocumentRequest) XXX_DiscardUnknown() { + xxx_messageInfo_IndexDocumentRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_IndexDocumentRequest proto.InternalMessageInfo + +func (m *IndexDocumentRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *IndexDocumentRequest) GetFields() *any.Any { + if m != nil { + return m.Fields + } + return nil +} + +type IndexDocumentResponse struct { + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IndexDocumentResponse) Reset() { *m = IndexDocumentResponse{} } +func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) } +func (*IndexDocumentResponse) ProtoMessage() {} +func (*IndexDocumentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{5} +} + +func (m *IndexDocumentResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IndexDocumentResponse.Unmarshal(m, b) +} +func (m *IndexDocumentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IndexDocumentResponse.Marshal(b, m, deterministic) +} +func (m *IndexDocumentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_IndexDocumentResponse.Merge(m, src) +} +func (m *IndexDocumentResponse) XXX_Size() int { + return xxx_messageInfo_IndexDocumentResponse.Size(m) +} +func (m *IndexDocumentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_IndexDocumentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_IndexDocumentResponse proto.InternalMessageInfo + +func (m *IndexDocumentResponse) GetCount() int32 { + if m != nil { + return m.Count + } + return 0 +} + +type DeleteDocumentRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteDocumentRequest) Reset() { *m = DeleteDocumentRequest{} } +func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteDocumentRequest) ProtoMessage() {} +func (*DeleteDocumentRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{6} +} + +func (m *DeleteDocumentRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteDocumentRequest.Unmarshal(m, b) +} +func (m *DeleteDocumentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteDocumentRequest.Marshal(b, m, deterministic) +} +func (m *DeleteDocumentRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteDocumentRequest.Merge(m, src) +} +func (m *DeleteDocumentRequest) XXX_Size() int { + return xxx_messageInfo_DeleteDocumentRequest.Size(m) +} +func (m *DeleteDocumentRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteDocumentRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteDocumentRequest proto.InternalMessageInfo + +func (m *DeleteDocumentRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type DeleteDocumentResponse struct { + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteDocumentResponse) Reset() { *m = DeleteDocumentResponse{} } +func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteDocumentResponse) ProtoMessage() {} +func (*DeleteDocumentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{7} +} + +func (m *DeleteDocumentResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteDocumentResponse.Unmarshal(m, b) +} +func (m *DeleteDocumentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteDocumentResponse.Marshal(b, m, deterministic) +} +func (m *DeleteDocumentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteDocumentResponse.Merge(m, src) +} +func (m *DeleteDocumentResponse) XXX_Size() int { + return xxx_messageInfo_DeleteDocumentResponse.Size(m) +} +func (m *DeleteDocumentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteDocumentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteDocumentResponse proto.InternalMessageInfo + +func (m *DeleteDocumentResponse) GetCount() int32 { + if m != nil { + return m.Count + } + return 0 +} + +type SearchRequest struct { + SearchRequest *any.Any `protobuf:"bytes,1,opt,name=search_request,json=searchRequest,proto3" json:"search_request,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SearchRequest) Reset() { *m = SearchRequest{} } +func (m *SearchRequest) String() string { return proto.CompactTextString(m) } +func (*SearchRequest) ProtoMessage() {} +func (*SearchRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{8} +} + +func (m *SearchRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SearchRequest.Unmarshal(m, b) +} +func (m *SearchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SearchRequest.Marshal(b, m, deterministic) +} +func (m *SearchRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SearchRequest.Merge(m, src) +} +func (m *SearchRequest) XXX_Size() int { + return xxx_messageInfo_SearchRequest.Size(m) +} +func (m *SearchRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SearchRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SearchRequest proto.InternalMessageInfo + +func (m *SearchRequest) GetSearchRequest() *any.Any { + if m != nil { + return m.SearchRequest + } + return nil +} + +type SearchResponse struct { + SearchResult *any.Any `protobuf:"bytes,1,opt,name=search_result,json=searchResult,proto3" json:"search_result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SearchResponse) Reset() { *m = SearchResponse{} } +func (m *SearchResponse) String() string { return proto.CompactTextString(m) } +func (*SearchResponse) ProtoMessage() {} +func (*SearchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{9} +} + +func (m *SearchResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SearchResponse.Unmarshal(m, b) +} +func (m *SearchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SearchResponse.Marshal(b, m, deterministic) +} +func (m *SearchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SearchResponse.Merge(m, src) +} +func (m *SearchResponse) XXX_Size() int { + return xxx_messageInfo_SearchResponse.Size(m) +} +func (m *SearchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SearchResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SearchResponse proto.InternalMessageInfo + +func (m *SearchResponse) GetSearchResult() *any.Any { + if m != nil { + return m.SearchResult + } + return nil +} + +func init() { + proto.RegisterEnum("distribute.LivenessProbeResponse_State", LivenessProbeResponse_State_name, LivenessProbeResponse_State_value) + proto.RegisterEnum("distribute.ReadinessProbeResponse_State", ReadinessProbeResponse_State_name, ReadinessProbeResponse_State_value) + proto.RegisterType((*LivenessProbeResponse)(nil), "distribute.LivenessProbeResponse") + proto.RegisterType((*ReadinessProbeResponse)(nil), "distribute.ReadinessProbeResponse") + proto.RegisterType((*GetDocumentRequest)(nil), "distribute.GetDocumentRequest") + proto.RegisterType((*GetDocumentResponse)(nil), "distribute.GetDocumentResponse") + proto.RegisterType((*IndexDocumentRequest)(nil), "distribute.IndexDocumentRequest") + proto.RegisterType((*IndexDocumentResponse)(nil), "distribute.IndexDocumentResponse") + proto.RegisterType((*DeleteDocumentRequest)(nil), "distribute.DeleteDocumentRequest") + proto.RegisterType((*DeleteDocumentResponse)(nil), "distribute.DeleteDocumentResponse") + proto.RegisterType((*SearchRequest)(nil), "distribute.SearchRequest") + proto.RegisterType((*SearchResponse)(nil), "distribute.SearchResponse") +} + +func init() { + proto.RegisterFile("protobuf/distribute/distribute.proto", fileDescriptor_0b1b3e8a99d31c9c) +} + +var fileDescriptor_0b1b3e8a99d31c9c = []byte{ + // 528 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0xdd, 0x8f, 0xd2, 0x4c, + 0x14, 0xc6, 0x29, 0xef, 0x0b, 0xca, 0xc1, 0x36, 0x64, 0x04, 0xe2, 0xd6, 0x44, 0xd7, 0xc9, 0x26, + 0x8b, 0xd1, 0x2d, 0x09, 0x5e, 0x19, 0xa3, 0x09, 0x5a, 0x62, 0x36, 0x4b, 0xba, 0x9b, 0xee, 0xfa, + 0x7d, 0xb1, 0x69, 0xe9, 0x59, 0xb6, 0xb1, 0x74, 0xb0, 0x33, 0x35, 0xee, 0xa5, 0x77, 0xfe, 0xc5, + 0x5e, 0x1b, 0xfa, 0x81, 0x1d, 0x28, 0xe0, 0x1d, 0x73, 0xce, 0x79, 0x7e, 0xf3, 0xf4, 0xe4, 0x19, + 0xe0, 0x60, 0x1e, 0x31, 0xc1, 0xdc, 0xf8, 0xaa, 0xef, 0xf9, 0x5c, 0x44, 0xbe, 0x1b, 0x0b, 0x2c, + 0xfc, 0x34, 0x92, 0x36, 0x81, 0xbf, 0x15, 0x7d, 0x6f, 0xca, 0xd8, 0x34, 0xc0, 0xfe, 0x52, 0xe8, + 0x84, 0x37, 0xe9, 0x98, 0x7e, 0x7f, 0xb5, 0x85, 0xb3, 0xb9, 0xc8, 0x9a, 0xf4, 0xa7, 0x02, 0x9d, + 0xb1, 0xff, 0x1d, 0x43, 0xe4, 0xfc, 0x2c, 0x62, 0x2e, 0xda, 0xc8, 0xe7, 0x2c, 0xe4, 0x48, 0x5e, + 0x42, 0x8d, 0x0b, 0x47, 0xe0, 0x3d, 0x65, 0x5f, 0xe9, 0x69, 0x83, 0x43, 0xa3, 0x70, 0x7f, 0xa9, + 0xc2, 0x38, 0x5f, 0x8c, 0xdb, 0xa9, 0x8a, 0x3e, 0x86, 0x5a, 0x72, 0x26, 0x4d, 0xb8, 0xf5, 0xce, + 0x3a, 0xb1, 0x4e, 0x3f, 0x58, 0xad, 0x0a, 0x69, 0x40, 0x6d, 0x38, 0x3e, 0x7e, 0x3f, 0x6a, 0x29, + 0xe4, 0x36, 0xfc, 0x6f, 0x8e, 0x86, 0x66, 0xab, 0x4a, 0x7f, 0x29, 0xd0, 0xb5, 0xd1, 0xf1, 0xfc, + 0x75, 0x13, 0xaf, 0x64, 0x13, 0xbd, 0xa2, 0x89, 0x72, 0x89, 0xec, 0xc2, 0xd8, 0xe4, 0xc2, 0x1e, + 0x0d, 0xcd, 0x4f, 0x2d, 0x85, 0xa8, 0xd0, 0xb0, 0x4e, 0x2f, 0x2e, 0xd3, 0x63, 0x95, 0x1e, 0x00, + 0x79, 0x8b, 0xc2, 0x64, 0x93, 0x78, 0x86, 0xa1, 0xb0, 0xf1, 0x5b, 0x8c, 0x5c, 0x10, 0x0d, 0xaa, + 0xbe, 0x97, 0x58, 0x68, 0xd8, 0x55, 0xdf, 0xa3, 0x6f, 0xe0, 0xae, 0x34, 0x95, 0x99, 0x7d, 0x0a, + 0xf5, 0x2b, 0x1f, 0x03, 0x8f, 0x27, 0xa3, 0xcd, 0x41, 0xdb, 0x48, 0x37, 0x6f, 0xe4, 0x9b, 0x37, + 0x86, 0xe1, 0x8d, 0x9d, 0xcd, 0xd0, 0x0b, 0x68, 0x1f, 0x87, 0x1e, 0xfe, 0xd8, 0x71, 0x59, 0x81, + 0x5a, 0xfd, 0x07, 0xea, 0x11, 0x74, 0x56, 0xa8, 0x99, 0xb9, 0x36, 0xd4, 0x26, 0x2c, 0x0e, 0x45, + 0x42, 0xae, 0xd9, 0xe9, 0x81, 0x1e, 0x42, 0xc7, 0xc4, 0x00, 0x05, 0xee, 0xfa, 0x64, 0x03, 0xba, + 0xab, 0x83, 0x5b, 0xc1, 0x63, 0x50, 0xcf, 0xd1, 0x89, 0x26, 0xd7, 0x39, 0xf0, 0x05, 0x68, 0x3c, + 0x29, 0x5c, 0x46, 0x69, 0x65, 0xeb, 0x92, 0x54, 0x5e, 0x14, 0xd3, 0x13, 0xd0, 0x72, 0x5a, 0x76, + 0xeb, 0x73, 0x50, 0x97, 0x38, 0x1e, 0x07, 0xdb, 0x69, 0x77, 0x72, 0xda, 0x62, 0x72, 0xf0, 0xfb, + 0x3f, 0x00, 0x73, 0x19, 0x23, 0x32, 0x06, 0x55, 0x8a, 0x33, 0xe9, 0xae, 0x31, 0x46, 0x8b, 0x07, + 0xa3, 0x3f, 0xda, 0xf9, 0x02, 0x68, 0x85, 0x58, 0xa0, 0xc9, 0xb9, 0xdc, 0x88, 0xa3, 0xbb, 0xb3, + 0x4c, 0x2b, 0xe4, 0x0c, 0x9a, 0x85, 0xa8, 0x91, 0x07, 0x45, 0xd1, 0x7a, 0x52, 0xf5, 0x87, 0x1b, + 0xfb, 0x4b, 0xe2, 0x47, 0x50, 0xa5, 0x84, 0x90, 0xfd, 0xa2, 0xa6, 0x2c, 0x92, 0xf2, 0x97, 0x97, + 0xc6, 0x8b, 0x56, 0x7a, 0x0a, 0xf9, 0x02, 0x9a, 0x9c, 0x11, 0x22, 0x09, 0x4b, 0x83, 0x26, 0xaf, + 0xa1, 0x3c, 0x62, 0x09, 0x7c, 0x08, 0xf5, 0x34, 0x02, 0x64, 0xaf, 0xa8, 0x90, 0x42, 0xa6, 0xeb, + 0x65, 0xad, 0x1c, 0xf2, 0xfa, 0xe8, 0xf3, 0x93, 0xa9, 0x2f, 0xae, 0x63, 0xd7, 0x98, 0xb0, 0x59, + 0x7f, 0xc6, 0x78, 0xfc, 0xd5, 0xe9, 0xbb, 0x81, 0xc3, 0x45, 0xbf, 0xe4, 0xff, 0xd6, 0xad, 0x27, + 0xc5, 0x67, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf3, 0xf4, 0x4b, 0x0a, 0x8d, 0x05, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// DistributeClient is the client API for Distribute service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type DistributeClient interface { + LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) + ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) + GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) + IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Distribute_IndexDocumentClient, error) + DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Distribute_DeleteDocumentClient, error) + Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) +} + +type distributeClient struct { + cc *grpc.ClientConn +} + +func NewDistributeClient(cc *grpc.ClientConn) DistributeClient { + return &distributeClient{cc} +} + +func (c *distributeClient) LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) { + out := new(LivenessProbeResponse) + err := c.cc.Invoke(ctx, "/distribute.Distribute/LivenessProbe", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *distributeClient) ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) { + out := new(ReadinessProbeResponse) + err := c.cc.Invoke(ctx, "/distribute.Distribute/ReadinessProbe", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *distributeClient) GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) { + out := new(GetDocumentResponse) + err := c.cc.Invoke(ctx, "/distribute.Distribute/GetDocument", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *distributeClient) IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Distribute_IndexDocumentClient, error) { + stream, err := c.cc.NewStream(ctx, &_Distribute_serviceDesc.Streams[0], "/distribute.Distribute/IndexDocument", opts...) + if err != nil { + return nil, err + } + x := &distributeIndexDocumentClient{stream} + return x, nil +} + +type Distribute_IndexDocumentClient interface { + Send(*IndexDocumentRequest) error + CloseAndRecv() (*IndexDocumentResponse, error) + grpc.ClientStream +} + +type distributeIndexDocumentClient struct { + grpc.ClientStream +} + +func (x *distributeIndexDocumentClient) Send(m *IndexDocumentRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *distributeIndexDocumentClient) CloseAndRecv() (*IndexDocumentResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(IndexDocumentResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *distributeClient) DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Distribute_DeleteDocumentClient, error) { + stream, err := c.cc.NewStream(ctx, &_Distribute_serviceDesc.Streams[1], "/distribute.Distribute/DeleteDocument", opts...) + if err != nil { + return nil, err + } + x := &distributeDeleteDocumentClient{stream} + return x, nil +} + +type Distribute_DeleteDocumentClient interface { + Send(*DeleteDocumentRequest) error + CloseAndRecv() (*DeleteDocumentResponse, error) + grpc.ClientStream +} + +type distributeDeleteDocumentClient struct { + grpc.ClientStream +} + +func (x *distributeDeleteDocumentClient) Send(m *DeleteDocumentRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *distributeDeleteDocumentClient) CloseAndRecv() (*DeleteDocumentResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(DeleteDocumentResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *distributeClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { + out := new(SearchResponse) + err := c.cc.Invoke(ctx, "/distribute.Distribute/Search", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DistributeServer is the server API for Distribute service. +type DistributeServer interface { + LivenessProbe(context.Context, *empty.Empty) (*LivenessProbeResponse, error) + ReadinessProbe(context.Context, *empty.Empty) (*ReadinessProbeResponse, error) + GetDocument(context.Context, *GetDocumentRequest) (*GetDocumentResponse, error) + IndexDocument(Distribute_IndexDocumentServer) error + DeleteDocument(Distribute_DeleteDocumentServer) error + Search(context.Context, *SearchRequest) (*SearchResponse, error) +} + +func RegisterDistributeServer(s *grpc.Server, srv DistributeServer) { + s.RegisterService(&_Distribute_serviceDesc, srv) +} + +func _Distribute_LivenessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DistributeServer).LivenessProbe(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/distribute.Distribute/LivenessProbe", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DistributeServer).LivenessProbe(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Distribute_ReadinessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DistributeServer).ReadinessProbe(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/distribute.Distribute/ReadinessProbe", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DistributeServer).ReadinessProbe(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Distribute_GetDocument_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDocumentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DistributeServer).GetDocument(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/distribute.Distribute/GetDocument", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DistributeServer).GetDocument(ctx, req.(*GetDocumentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Distribute_IndexDocument_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(DistributeServer).IndexDocument(&distributeIndexDocumentServer{stream}) +} + +type Distribute_IndexDocumentServer interface { + SendAndClose(*IndexDocumentResponse) error + Recv() (*IndexDocumentRequest, error) + grpc.ServerStream +} + +type distributeIndexDocumentServer struct { + grpc.ServerStream +} + +func (x *distributeIndexDocumentServer) SendAndClose(m *IndexDocumentResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *distributeIndexDocumentServer) Recv() (*IndexDocumentRequest, error) { + m := new(IndexDocumentRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Distribute_DeleteDocument_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(DistributeServer).DeleteDocument(&distributeDeleteDocumentServer{stream}) +} + +type Distribute_DeleteDocumentServer interface { + SendAndClose(*DeleteDocumentResponse) error + Recv() (*DeleteDocumentRequest, error) + grpc.ServerStream +} + +type distributeDeleteDocumentServer struct { + grpc.ServerStream +} + +func (x *distributeDeleteDocumentServer) SendAndClose(m *DeleteDocumentResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *distributeDeleteDocumentServer) Recv() (*DeleteDocumentRequest, error) { + m := new(DeleteDocumentRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Distribute_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DistributeServer).Search(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/distribute.Distribute/Search", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DistributeServer).Search(ctx, req.(*SearchRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Distribute_serviceDesc = grpc.ServiceDesc{ + ServiceName: "distribute.Distribute", + HandlerType: (*DistributeServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "LivenessProbe", + Handler: _Distribute_LivenessProbe_Handler, + }, + { + MethodName: "ReadinessProbe", + Handler: _Distribute_ReadinessProbe_Handler, + }, + { + MethodName: "GetDocument", + Handler: _Distribute_GetDocument_Handler, + }, + { + MethodName: "Search", + Handler: _Distribute_Search_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "IndexDocument", + Handler: _Distribute_IndexDocument_Handler, + ClientStreams: true, + }, + { + StreamName: "DeleteDocument", + Handler: _Distribute_DeleteDocument_Handler, + ClientStreams: true, + }, + }, + Metadata: "protobuf/distribute/distribute.proto", +} diff --git a/protobuf/distribute/distribute.proto b/protobuf/distribute/distribute.proto new file mode 100644 index 0000000..c2c6f20 --- /dev/null +++ b/protobuf/distribute/distribute.proto @@ -0,0 +1,85 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +import "google/protobuf/any.proto"; +import "google/protobuf/empty.proto"; + +package distribute; + +option go_package = "github.com/mosuka/blast/protobuf/distribute"; + +service Distribute { + rpc LivenessProbe (google.protobuf.Empty) returns (LivenessProbeResponse) {} + rpc ReadinessProbe (google.protobuf.Empty) returns (ReadinessProbeResponse) {} + + rpc GetDocument (GetDocumentRequest) returns (GetDocumentResponse) {} + rpc IndexDocument (stream IndexDocumentRequest) returns (IndexDocumentResponse) {} + rpc DeleteDocument (stream DeleteDocumentRequest) returns (DeleteDocumentResponse) {} + rpc Search (SearchRequest) returns (SearchResponse) {} +} + +// use for health check +message LivenessProbeResponse { + enum State { + UNKNOWN = 0; + ALIVE = 1; + DEAD = 2; + } + State state = 1; +} + +// use for health check +message ReadinessProbeResponse { + enum State { + UNKNOWN = 0; + READY = 1; + NOT_READY = 2; + } + State state = 1; +} + +message GetDocumentRequest { + string id = 1; +} + +message GetDocumentResponse { + google.protobuf.Any fields = 1; +} + +message IndexDocumentRequest { + string id = 1; + google.protobuf.Any fields = 2; +} + +message IndexDocumentResponse { + int32 count = 1; +} + +message DeleteDocumentRequest { + string id = 1; +} + +message DeleteDocumentResponse { + int32 count = 1; +} + +message SearchRequest { + google.protobuf.Any search_request = 1; +} + +message SearchResponse { + google.protobuf.Any search_result = 1; +} diff --git a/protobuf/blast.pb.go b/protobuf/index/index.pb.go similarity index 54% rename from protobuf/blast.pb.go rename to protobuf/index/index.pb.go index 035376f..e0e0dcf 100644 --- a/protobuf/blast.pb.go +++ b/protobuf/index/index.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: protobuf/blast.proto +// source: protobuf/index/index.proto -package protobuf +package index import ( context "context" @@ -49,7 +49,7 @@ func (x LivenessProbeResponse_State) String() string { } func (LivenessProbeResponse_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{0, 0} + return fileDescriptor_7b2daf652facb3ae, []int{0, 0} } type ReadinessProbeResponse_State int32 @@ -77,40 +77,12 @@ func (x ReadinessProbeResponse_State) String() string { } func (ReadinessProbeResponse_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{1, 0} -} - -type WatchStoreResponse_Command int32 - -const ( - WatchStoreResponse_UNKNOWN WatchStoreResponse_Command = 0 - WatchStoreResponse_SET WatchStoreResponse_Command = 1 - WatchStoreResponse_DELETE WatchStoreResponse_Command = 2 -) - -var WatchStoreResponse_Command_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SET", - 2: "DELETE", -} - -var WatchStoreResponse_Command_value = map[string]int32{ - "UNKNOWN": 0, - "SET": 1, - "DELETE": 2, -} - -func (x WatchStoreResponse_Command) String() string { - return proto.EnumName(WatchStoreResponse_Command_name, int32(x)) -} - -func (WatchStoreResponse_Command) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{12, 0} + return fileDescriptor_7b2daf652facb3ae, []int{1, 0} } // use for health check type LivenessProbeResponse struct { - State LivenessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=protobuf.LivenessProbeResponse_State" json:"state,omitempty"` + State LivenessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=index.LivenessProbeResponse_State" json:"state,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -120,7 +92,7 @@ func (m *LivenessProbeResponse) Reset() { *m = LivenessProbeResponse{} } func (m *LivenessProbeResponse) String() string { return proto.CompactTextString(m) } func (*LivenessProbeResponse) ProtoMessage() {} func (*LivenessProbeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{0} + return fileDescriptor_7b2daf652facb3ae, []int{0} } func (m *LivenessProbeResponse) XXX_Unmarshal(b []byte) error { @@ -150,7 +122,7 @@ func (m *LivenessProbeResponse) GetState() LivenessProbeResponse_State { // use for health check type ReadinessProbeResponse struct { - State ReadinessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=protobuf.ReadinessProbeResponse_State" json:"state,omitempty"` + State ReadinessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=index.ReadinessProbeResponse_State" json:"state,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -160,7 +132,7 @@ func (m *ReadinessProbeResponse) Reset() { *m = ReadinessProbeResponse{} func (m *ReadinessProbeResponse) String() string { return proto.CompactTextString(m) } func (*ReadinessProbeResponse) ProtoMessage() {} func (*ReadinessProbeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{1} + return fileDescriptor_7b2daf652facb3ae, []int{1} } func (m *ReadinessProbeResponse) XXX_Unmarshal(b []byte) error { @@ -200,7 +172,7 @@ func (m *GetNodeRequest) Reset() { *m = GetNodeRequest{} } func (m *GetNodeRequest) String() string { return proto.CompactTextString(m) } func (*GetNodeRequest) ProtoMessage() {} func (*GetNodeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{2} + return fileDescriptor_7b2daf652facb3ae, []int{2} } func (m *GetNodeRequest) XXX_Unmarshal(b []byte) error { @@ -241,7 +213,7 @@ func (m *GetNodeResponse) Reset() { *m = GetNodeResponse{} } func (m *GetNodeResponse) String() string { return proto.CompactTextString(m) } func (*GetNodeResponse) ProtoMessage() {} func (*GetNodeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{3} + return fileDescriptor_7b2daf652facb3ae, []int{3} } func (m *GetNodeResponse) XXX_Unmarshal(b []byte) error { @@ -289,7 +261,7 @@ func (m *SetNodeRequest) Reset() { *m = SetNodeRequest{} } func (m *SetNodeRequest) String() string { return proto.CompactTextString(m) } func (*SetNodeRequest) ProtoMessage() {} func (*SetNodeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{4} + return fileDescriptor_7b2daf652facb3ae, []int{4} } func (m *SetNodeRequest) XXX_Unmarshal(b []byte) error { @@ -336,7 +308,7 @@ func (m *DeleteNodeRequest) Reset() { *m = DeleteNodeRequest{} } func (m *DeleteNodeRequest) String() string { return proto.CompactTextString(m) } func (*DeleteNodeRequest) ProtoMessage() {} func (*DeleteNodeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{5} + return fileDescriptor_7b2daf652facb3ae, []int{5} } func (m *DeleteNodeRequest) XXX_Unmarshal(b []byte) error { @@ -376,7 +348,7 @@ func (m *GetClusterResponse) Reset() { *m = GetClusterResponse{} } func (m *GetClusterResponse) String() string { return proto.CompactTextString(m) } func (*GetClusterResponse) ProtoMessage() {} func (*GetClusterResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{6} + return fileDescriptor_7b2daf652facb3ae, []int{6} } func (m *GetClusterResponse) XXX_Unmarshal(b []byte) error { @@ -404,264 +376,6 @@ func (m *GetClusterResponse) GetCluster() *any.Any { return nil } -type GetValueRequest struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetValueRequest) Reset() { *m = GetValueRequest{} } -func (m *GetValueRequest) String() string { return proto.CompactTextString(m) } -func (*GetValueRequest) ProtoMessage() {} -func (*GetValueRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{7} -} - -func (m *GetValueRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetValueRequest.Unmarshal(m, b) -} -func (m *GetValueRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetValueRequest.Marshal(b, m, deterministic) -} -func (m *GetValueRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetValueRequest.Merge(m, src) -} -func (m *GetValueRequest) XXX_Size() int { - return xxx_messageInfo_GetValueRequest.Size(m) -} -func (m *GetValueRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetValueRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetValueRequest proto.InternalMessageInfo - -func (m *GetValueRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -type GetValueResponse struct { - Value *any.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetValueResponse) Reset() { *m = GetValueResponse{} } -func (m *GetValueResponse) String() string { return proto.CompactTextString(m) } -func (*GetValueResponse) ProtoMessage() {} -func (*GetValueResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{8} -} - -func (m *GetValueResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetValueResponse.Unmarshal(m, b) -} -func (m *GetValueResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetValueResponse.Marshal(b, m, deterministic) -} -func (m *GetValueResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetValueResponse.Merge(m, src) -} -func (m *GetValueResponse) XXX_Size() int { - return xxx_messageInfo_GetValueResponse.Size(m) -} -func (m *GetValueResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetValueResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetValueResponse proto.InternalMessageInfo - -func (m *GetValueResponse) GetValue() *any.Any { - if m != nil { - return m.Value - } - return nil -} - -type SetValueRequest struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value *any.Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetValueRequest) Reset() { *m = SetValueRequest{} } -func (m *SetValueRequest) String() string { return proto.CompactTextString(m) } -func (*SetValueRequest) ProtoMessage() {} -func (*SetValueRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{9} -} - -func (m *SetValueRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetValueRequest.Unmarshal(m, b) -} -func (m *SetValueRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetValueRequest.Marshal(b, m, deterministic) -} -func (m *SetValueRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetValueRequest.Merge(m, src) -} -func (m *SetValueRequest) XXX_Size() int { - return xxx_messageInfo_SetValueRequest.Size(m) -} -func (m *SetValueRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetValueRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetValueRequest proto.InternalMessageInfo - -func (m *SetValueRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *SetValueRequest) GetValue() *any.Any { - if m != nil { - return m.Value - } - return nil -} - -type DeleteValueRequest struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteValueRequest) Reset() { *m = DeleteValueRequest{} } -func (m *DeleteValueRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteValueRequest) ProtoMessage() {} -func (*DeleteValueRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{10} -} - -func (m *DeleteValueRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteValueRequest.Unmarshal(m, b) -} -func (m *DeleteValueRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteValueRequest.Marshal(b, m, deterministic) -} -func (m *DeleteValueRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteValueRequest.Merge(m, src) -} -func (m *DeleteValueRequest) XXX_Size() int { - return xxx_messageInfo_DeleteValueRequest.Size(m) -} -func (m *DeleteValueRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteValueRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteValueRequest proto.InternalMessageInfo - -func (m *DeleteValueRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -type WatchStoreRequest struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchStoreRequest) Reset() { *m = WatchStoreRequest{} } -func (m *WatchStoreRequest) String() string { return proto.CompactTextString(m) } -func (*WatchStoreRequest) ProtoMessage() {} -func (*WatchStoreRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{11} -} - -func (m *WatchStoreRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_WatchStoreRequest.Unmarshal(m, b) -} -func (m *WatchStoreRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_WatchStoreRequest.Marshal(b, m, deterministic) -} -func (m *WatchStoreRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchStoreRequest.Merge(m, src) -} -func (m *WatchStoreRequest) XXX_Size() int { - return xxx_messageInfo_WatchStoreRequest.Size(m) -} -func (m *WatchStoreRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WatchStoreRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WatchStoreRequest proto.InternalMessageInfo - -func (m *WatchStoreRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -type WatchStoreResponse struct { - Command WatchStoreResponse_Command `protobuf:"varint,1,opt,name=command,proto3,enum=protobuf.WatchStoreResponse_Command" json:"command,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchStoreResponse) Reset() { *m = WatchStoreResponse{} } -func (m *WatchStoreResponse) String() string { return proto.CompactTextString(m) } -func (*WatchStoreResponse) ProtoMessage() {} -func (*WatchStoreResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{12} -} - -func (m *WatchStoreResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_WatchStoreResponse.Unmarshal(m, b) -} -func (m *WatchStoreResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_WatchStoreResponse.Marshal(b, m, deterministic) -} -func (m *WatchStoreResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchStoreResponse.Merge(m, src) -} -func (m *WatchStoreResponse) XXX_Size() int { - return xxx_messageInfo_WatchStoreResponse.Size(m) -} -func (m *WatchStoreResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WatchStoreResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_WatchStoreResponse proto.InternalMessageInfo - -func (m *WatchStoreResponse) GetCommand() WatchStoreResponse_Command { - if m != nil { - return m.Command - } - return WatchStoreResponse_UNKNOWN -} - -func (m *WatchStoreResponse) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *WatchStoreResponse) GetValue() *any.Any { - if m != nil { - return m.Value - } - return nil -} - type GetDocumentRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -673,7 +387,7 @@ func (m *GetDocumentRequest) Reset() { *m = GetDocumentRequest{} } func (m *GetDocumentRequest) String() string { return proto.CompactTextString(m) } func (*GetDocumentRequest) ProtoMessage() {} func (*GetDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{13} + return fileDescriptor_7b2daf652facb3ae, []int{7} } func (m *GetDocumentRequest) XXX_Unmarshal(b []byte) error { @@ -712,7 +426,7 @@ func (m *GetDocumentResponse) Reset() { *m = GetDocumentResponse{} } func (m *GetDocumentResponse) String() string { return proto.CompactTextString(m) } func (*GetDocumentResponse) ProtoMessage() {} func (*GetDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{14} + return fileDescriptor_7b2daf652facb3ae, []int{8} } func (m *GetDocumentResponse) XXX_Unmarshal(b []byte) error { @@ -752,7 +466,7 @@ func (m *IndexDocumentRequest) Reset() { *m = IndexDocumentRequest{} } func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) } func (*IndexDocumentRequest) ProtoMessage() {} func (*IndexDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{15} + return fileDescriptor_7b2daf652facb3ae, []int{9} } func (m *IndexDocumentRequest) XXX_Unmarshal(b []byte) error { @@ -798,7 +512,7 @@ func (m *IndexDocumentResponse) Reset() { *m = IndexDocumentResponse{} } func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) } func (*IndexDocumentResponse) ProtoMessage() {} func (*IndexDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{16} + return fileDescriptor_7b2daf652facb3ae, []int{10} } func (m *IndexDocumentResponse) XXX_Unmarshal(b []byte) error { @@ -837,7 +551,7 @@ func (m *DeleteDocumentRequest) Reset() { *m = DeleteDocumentRequest{} } func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) } func (*DeleteDocumentRequest) ProtoMessage() {} func (*DeleteDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{17} + return fileDescriptor_7b2daf652facb3ae, []int{11} } func (m *DeleteDocumentRequest) XXX_Unmarshal(b []byte) error { @@ -876,7 +590,7 @@ func (m *DeleteDocumentResponse) Reset() { *m = DeleteDocumentResponse{} func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) } func (*DeleteDocumentResponse) ProtoMessage() {} func (*DeleteDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{18} + return fileDescriptor_7b2daf652facb3ae, []int{12} } func (m *DeleteDocumentResponse) XXX_Unmarshal(b []byte) error { @@ -915,7 +629,7 @@ func (m *SearchRequest) Reset() { *m = SearchRequest{} } func (m *SearchRequest) String() string { return proto.CompactTextString(m) } func (*SearchRequest) ProtoMessage() {} func (*SearchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{19} + return fileDescriptor_7b2daf652facb3ae, []int{13} } func (m *SearchRequest) XXX_Unmarshal(b []byte) error { @@ -954,7 +668,7 @@ func (m *SearchResponse) Reset() { *m = SearchResponse{} } func (m *SearchResponse) String() string { return proto.CompactTextString(m) } func (*SearchResponse) ProtoMessage() {} func (*SearchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{20} + return fileDescriptor_7b2daf652facb3ae, []int{14} } func (m *SearchResponse) XXX_Unmarshal(b []byte) error { @@ -993,7 +707,7 @@ func (m *GetIndexConfigResponse) Reset() { *m = GetIndexConfigResponse{} func (m *GetIndexConfigResponse) String() string { return proto.CompactTextString(m) } func (*GetIndexConfigResponse) ProtoMessage() {} func (*GetIndexConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{21} + return fileDescriptor_7b2daf652facb3ae, []int{15} } func (m *GetIndexConfigResponse) XXX_Unmarshal(b []byte) error { @@ -1032,7 +746,7 @@ func (m *GetIndexStatsResponse) Reset() { *m = GetIndexStatsResponse{} } func (m *GetIndexStatsResponse) String() string { return proto.CompactTextString(m) } func (*GetIndexStatsResponse) ProtoMessage() {} func (*GetIndexStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{22} + return fileDescriptor_7b2daf652facb3ae, []int{16} } func (m *GetIndexStatsResponse) XXX_Unmarshal(b []byte) error { @@ -1073,7 +787,7 @@ func (m *Document) Reset() { *m = Document{} } func (m *Document) String() string { return proto.CompactTextString(m) } func (*Document) ProtoMessage() {} func (*Document) Descriptor() ([]byte, []int) { - return fileDescriptor_406ca165ef12c7d5, []int{23} + return fileDescriptor_7b2daf652facb3ae, []int{17} } func (m *Document) XXX_Unmarshal(b []byte) error { @@ -1109,98 +823,80 @@ func (m *Document) GetFields() *any.Any { } func init() { - proto.RegisterEnum("protobuf.LivenessProbeResponse_State", LivenessProbeResponse_State_name, LivenessProbeResponse_State_value) - proto.RegisterEnum("protobuf.ReadinessProbeResponse_State", ReadinessProbeResponse_State_name, ReadinessProbeResponse_State_value) - proto.RegisterEnum("protobuf.WatchStoreResponse_Command", WatchStoreResponse_Command_name, WatchStoreResponse_Command_value) - proto.RegisterType((*LivenessProbeResponse)(nil), "protobuf.LivenessProbeResponse") - proto.RegisterType((*ReadinessProbeResponse)(nil), "protobuf.ReadinessProbeResponse") - proto.RegisterType((*GetNodeRequest)(nil), "protobuf.GetNodeRequest") - proto.RegisterType((*GetNodeResponse)(nil), "protobuf.GetNodeResponse") - proto.RegisterType((*SetNodeRequest)(nil), "protobuf.SetNodeRequest") - proto.RegisterType((*DeleteNodeRequest)(nil), "protobuf.DeleteNodeRequest") - proto.RegisterType((*GetClusterResponse)(nil), "protobuf.GetClusterResponse") - proto.RegisterType((*GetValueRequest)(nil), "protobuf.GetValueRequest") - proto.RegisterType((*GetValueResponse)(nil), "protobuf.GetValueResponse") - proto.RegisterType((*SetValueRequest)(nil), "protobuf.SetValueRequest") - proto.RegisterType((*DeleteValueRequest)(nil), "protobuf.DeleteValueRequest") - proto.RegisterType((*WatchStoreRequest)(nil), "protobuf.WatchStoreRequest") - proto.RegisterType((*WatchStoreResponse)(nil), "protobuf.WatchStoreResponse") - proto.RegisterType((*GetDocumentRequest)(nil), "protobuf.GetDocumentRequest") - proto.RegisterType((*GetDocumentResponse)(nil), "protobuf.GetDocumentResponse") - proto.RegisterType((*IndexDocumentRequest)(nil), "protobuf.IndexDocumentRequest") - proto.RegisterType((*IndexDocumentResponse)(nil), "protobuf.IndexDocumentResponse") - proto.RegisterType((*DeleteDocumentRequest)(nil), "protobuf.DeleteDocumentRequest") - proto.RegisterType((*DeleteDocumentResponse)(nil), "protobuf.DeleteDocumentResponse") - proto.RegisterType((*SearchRequest)(nil), "protobuf.SearchRequest") - proto.RegisterType((*SearchResponse)(nil), "protobuf.SearchResponse") - proto.RegisterType((*GetIndexConfigResponse)(nil), "protobuf.GetIndexConfigResponse") - proto.RegisterType((*GetIndexStatsResponse)(nil), "protobuf.GetIndexStatsResponse") - proto.RegisterType((*Document)(nil), "protobuf.Document") -} - -func init() { proto.RegisterFile("protobuf/blast.proto", fileDescriptor_406ca165ef12c7d5) } - -var fileDescriptor_406ca165ef12c7d5 = []byte{ - // 939 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0xeb, 0x6e, 0xe3, 0x44, - 0x18, 0x8d, 0x5d, 0xd2, 0xa4, 0x27, 0x4d, 0x36, 0x3b, 0xa4, 0xa5, 0xeb, 0x5d, 0xd8, 0x68, 0xf6, - 0xc2, 0x72, 0x73, 0x51, 0x01, 0x21, 0xb4, 0x17, 0xe8, 0x26, 0x56, 0x77, 0x69, 0x94, 0x82, 0x1d, - 0x5a, 0x81, 0x84, 0x2a, 0x27, 0x99, 0xb6, 0xd6, 0x26, 0x76, 0x89, 0xc7, 0x2b, 0xfa, 0x8b, 0x7f, - 0xbc, 0x09, 0xcf, 0xc1, 0xab, 0xad, 0x7c, 0xbf, 0xc4, 0xb1, 0x23, 0xed, 0xbf, 0xcc, 0xcc, 0xf9, - 0xce, 0x77, 0x99, 0xc9, 0x39, 0x32, 0x3a, 0xd7, 0x0b, 0x8b, 0x5b, 0x63, 0xe7, 0x62, 0x7f, 0x3c, - 0xd3, 0x6d, 0x2e, 0x7b, 0x4b, 0x52, 0x0f, 0x77, 0xa5, 0x3b, 0x97, 0x96, 0x75, 0x39, 0x63, 0xfb, - 0x11, 0x4c, 0x37, 0x6f, 0x7c, 0x90, 0x74, 0x37, 0x7b, 0xc4, 0xe6, 0xd7, 0x3c, 0x38, 0xa4, 0xff, - 0x60, 0x67, 0x60, 0xbc, 0x65, 0x26, 0xb3, 0xed, 0x5f, 0x16, 0xd6, 0x98, 0xa9, 0xcc, 0xbe, 0xb6, - 0x4c, 0x9b, 0x91, 0xa7, 0xa8, 0xda, 0x5c, 0xe7, 0x6c, 0x4f, 0xe8, 0x0a, 0x4f, 0x5a, 0x07, 0x8f, - 0xe4, 0x30, 0x5c, 0xce, 0xc5, 0xcb, 0x9a, 0x0b, 0x56, 0xfd, 0x18, 0xfa, 0x19, 0xaa, 0xde, 0x9a, - 0x34, 0x50, 0xfb, 0x6d, 0x78, 0x3c, 0x3c, 0x39, 0x1b, 0xb6, 0x2b, 0x64, 0x0b, 0xd5, 0xc3, 0xc1, - 0xeb, 0x53, 0xa5, 0x2d, 0x90, 0x3a, 0x3e, 0xe8, 0x2b, 0x87, 0xfd, 0xb6, 0x48, 0xff, 0x15, 0xb0, - 0xab, 0x32, 0x7d, 0x6a, 0x2c, 0x97, 0xf0, 0x2c, 0x5d, 0xc2, 0xe3, 0xb8, 0x84, 0xfc, 0x80, 0x74, - 0x0d, 0xf2, 0xaa, 0x1a, 0x54, 0xe5, 0xb0, 0xff, 0x7b, 0x5b, 0x20, 0x4d, 0x6c, 0x0d, 0x4f, 0x46, - 0xe7, 0xfe, 0x52, 0xa4, 0x5d, 0xb4, 0x8e, 0x18, 0x1f, 0x5a, 0x53, 0xa6, 0xb2, 0xbf, 0x1c, 0x66, - 0x73, 0xd2, 0x82, 0x68, 0x4c, 0xbd, 0xe4, 0x5b, 0xaa, 0x68, 0x4c, 0xe9, 0x9f, 0xb8, 0x15, 0x21, - 0x82, 0x12, 0xbf, 0x05, 0x4c, 0x6b, 0xca, 0x7a, 0x96, 0x79, 0x61, 0x5c, 0x7a, 0xd0, 0xc6, 0x41, - 0x47, 0xf6, 0x07, 0x1e, 0x97, 0x7b, 0x68, 0xde, 0xa8, 0x09, 0x1c, 0xe9, 0x84, 0x8d, 0x89, 0x1e, - 0x77, 0x50, 0xf0, 0x29, 0x5a, 0x5a, 0x61, 0x01, 0x99, 0x6c, 0xe2, 0x7a, 0xd9, 0xe8, 0x03, 0xdc, - 0xee, 0xb3, 0x19, 0xe3, 0xac, 0xa8, 0xb7, 0x3e, 0xc8, 0x11, 0xe3, 0xbd, 0x99, 0x63, 0x73, 0xb6, - 0x88, 0xda, 0x93, 0x51, 0x9b, 0xf8, 0x5b, 0x85, 0xbd, 0x85, 0x20, 0xfa, 0xc0, 0x9b, 0xd0, 0xa9, - 0x3e, 0x73, 0xa2, 0x44, 0x6d, 0x6c, 0xbc, 0x61, 0x37, 0x41, 0x26, 0xf7, 0x27, 0x7d, 0x81, 0x76, - 0x0c, 0x0a, 0x12, 0x7d, 0x8e, 0xea, 0x5b, 0x77, 0xa3, 0x30, 0x8d, 0x0f, 0xa1, 0x27, 0xb8, 0xa5, - 0x95, 0x25, 0x89, 0x09, 0xc5, 0x72, 0xc2, 0xc7, 0x20, 0xfe, 0x80, 0x4a, 0x0a, 0x7f, 0x84, 0xdb, - 0x67, 0x3a, 0x9f, 0x5c, 0x69, 0xdc, 0x5a, 0x14, 0xc0, 0xfe, 0x17, 0x40, 0x92, 0xb8, 0xa0, 0xc5, - 0x17, 0xa8, 0x4d, 0xac, 0xf9, 0x5c, 0x37, 0xa7, 0xc1, 0x7b, 0x7e, 0x18, 0x17, 0xb3, 0x0c, 0x97, - 0x7b, 0x3e, 0x56, 0x0d, 0x83, 0xc2, 0x44, 0x62, 0x4e, 0x8f, 0x1b, 0xe5, 0x3d, 0x7e, 0x81, 0x5a, - 0xc0, 0x98, 0xfe, 0x3f, 0xd4, 0xb0, 0xa1, 0x29, 0xa3, 0xb6, 0x40, 0x80, 0xcd, 0xbe, 0x32, 0x50, - 0x46, 0x4a, 0x5b, 0xa4, 0x0f, 0xbd, 0xc7, 0xd0, 0xb7, 0x26, 0xce, 0x9c, 0x99, 0x7c, 0xd5, 0x93, - 0xe9, 0xe1, 0xc3, 0x14, 0x2a, 0xe8, 0xf3, 0x4b, 0x6c, 0x5e, 0x18, 0x6c, 0x36, 0xb5, 0x0b, 0xef, - 0x32, 0xc0, 0xd0, 0x11, 0x3a, 0xaf, 0xcd, 0x29, 0xfb, 0xbb, 0x24, 0x59, 0x82, 0x55, 0x5c, 0x83, - 0xf5, 0x2b, 0xec, 0x64, 0x58, 0x83, 0xe2, 0x3a, 0xa8, 0x4e, 0x2c, 0xc7, 0xe4, 0x1e, 0x73, 0x55, - 0xf5, 0x17, 0xf4, 0x53, 0xec, 0xf8, 0x0f, 0xa0, 0xac, 0x65, 0x19, 0xbb, 0x59, 0x60, 0x21, 0xf1, - 0x00, 0x4d, 0x8d, 0xe9, 0x8b, 0xc9, 0x55, 0x48, 0xf8, 0x14, 0x2d, 0xdb, 0xdb, 0x38, 0x5f, 0xf8, - 0x3b, 0x85, 0x43, 0x6a, 0xda, 0xc9, 0x60, 0x7a, 0xec, 0x0a, 0x84, 0xbf, 0x11, 0x64, 0xfd, 0x01, - 0xcd, 0x88, 0xce, 0x76, 0x66, 0xc5, 0x6c, 0xdb, 0x21, 0x9b, 0x8b, 0xa4, 0xbf, 0x62, 0xf7, 0x88, - 0x71, 0x6f, 0x4a, 0xbe, 0x4e, 0x44, 0xa4, 0xdf, 0x63, 0xdb, 0x70, 0xb7, 0xcf, 0x27, 0xe5, 0xaa, - 0xd6, 0x30, 0x62, 0x02, 0x3a, 0xc4, 0x4e, 0x48, 0xe9, 0x2a, 0xaf, 0x1d, 0x31, 0x7e, 0x07, 0x1f, - 0x77, 0xee, 0x0a, 0x5d, 0xf1, 0xbb, 0x80, 0x11, 0x85, 0xd3, 0x57, 0xa8, 0x87, 0x73, 0x7e, 0xbf, - 0xf7, 0x70, 0xf0, 0x1f, 0x50, 0x7d, 0xe9, 0xfa, 0x26, 0xf9, 0x19, 0xcd, 0x94, 0x7f, 0x91, 0xdd, - 0xa5, 0x40, 0xc5, 0xb5, 0x47, 0xe9, 0x7e, 0x89, 0xe1, 0xd1, 0x0a, 0x19, 0xa0, 0x95, 0x36, 0xa2, - 0x95, 0x64, 0xdd, 0x32, 0xeb, 0xa2, 0x15, 0xf2, 0x13, 0x6a, 0x81, 0xbb, 0x90, 0xbd, 0x18, 0x9e, - 0xb6, 0x24, 0xe9, 0x4e, 0xce, 0x49, 0xc4, 0xf0, 0x1c, 0x35, 0x6d, 0x99, 0x21, 0xed, 0x29, 0xd2, - 0x8a, 0x12, 0x69, 0x85, 0xf4, 0x80, 0xd8, 0x27, 0xc8, 0xdd, 0x18, 0xb0, 0xe4, 0x1e, 0x05, 0x24, - 0x7d, 0x20, 0xf6, 0x91, 0x95, 0xf3, 0xb8, 0x97, 0x6a, 0x23, 0xe3, 0x3a, 0xb4, 0x42, 0x5e, 0x61, - 0xdb, 0x93, 0xc4, 0xf7, 0xe4, 0xf9, 0x5a, 0x20, 0xcf, 0x50, 0xd7, 0x4c, 0xfd, 0xda, 0xbe, 0xb2, - 0xf8, 0x4a, 0x96, 0xa2, 0x91, 0xd4, 0x43, 0xab, 0x22, 0xe9, 0xd1, 0x27, 0xad, 0x42, 0x92, 0xf2, - 0x8e, 0xa2, 0x66, 0x7e, 0x44, 0x5d, 0xcb, 0x21, 0xc9, 0x78, 0x58, 0x41, 0x15, 0x0a, 0x1a, 0x09, - 0x7f, 0x22, 0xf7, 0xb2, 0x37, 0xb3, 0x26, 0xcd, 0x31, 0x10, 0xfb, 0x4c, 0xf2, 0x7e, 0x97, 0x4c, - 0x2d, 0x39, 0xd7, 0x65, 0x6b, 0xf2, 0xe6, 0x3a, 0x40, 0x23, 0x21, 0xfe, 0x24, 0x7d, 0x11, 0x19, - 0x19, 0x95, 0x3e, 0x5e, 0x71, 0x1a, 0x8d, 0x68, 0x84, 0x66, 0x4a, 0xaf, 0xc9, 0x27, 0x71, 0x44, - 0x9e, 0x3d, 0x24, 0xff, 0x9d, 0xb9, 0x42, 0x4f, 0x2b, 0x4f, 0x04, 0x72, 0x86, 0x56, 0x5a, 0xad, - 0xc9, 0xfd, 0xec, 0xe8, 0xb2, 0xbc, 0xdd, 0xd5, 0x80, 0x04, 0xf1, 0x73, 0x6c, 0xfa, 0x42, 0x4c, - 0x3e, 0x4a, 0xde, 0x67, 0x42, 0xab, 0xa5, 0xbd, 0xe5, 0x83, 0xa4, 0x6e, 0xa4, 0xa5, 0x77, 0x1d, - 0xdd, 0xc8, 0x17, 0x6b, 0x5a, 0x71, 0x15, 0x2d, 0xa5, 0xba, 0xeb, 0x28, 0x5a, 0xae, 0x4c, 0xd3, - 0xca, 0x4b, 0xfa, 0x47, 0xf7, 0xd2, 0xe0, 0x57, 0xce, 0x58, 0x9e, 0x58, 0xf3, 0xfd, 0xb9, 0x65, - 0x3b, 0x6f, 0x74, 0xff, 0x83, 0x23, 0xfa, 0x7a, 0x18, 0x6f, 0x7a, 0xbf, 0xbe, 0x79, 0x17, 0x00, - 0x00, 0xff, 0xff, 0xd0, 0x9c, 0x8b, 0x11, 0x92, 0x0c, 0x00, 0x00, + proto.RegisterEnum("index.LivenessProbeResponse_State", LivenessProbeResponse_State_name, LivenessProbeResponse_State_value) + proto.RegisterEnum("index.ReadinessProbeResponse_State", ReadinessProbeResponse_State_name, ReadinessProbeResponse_State_value) + proto.RegisterType((*LivenessProbeResponse)(nil), "index.LivenessProbeResponse") + proto.RegisterType((*ReadinessProbeResponse)(nil), "index.ReadinessProbeResponse") + proto.RegisterType((*GetNodeRequest)(nil), "index.GetNodeRequest") + proto.RegisterType((*GetNodeResponse)(nil), "index.GetNodeResponse") + proto.RegisterType((*SetNodeRequest)(nil), "index.SetNodeRequest") + proto.RegisterType((*DeleteNodeRequest)(nil), "index.DeleteNodeRequest") + proto.RegisterType((*GetClusterResponse)(nil), "index.GetClusterResponse") + proto.RegisterType((*GetDocumentRequest)(nil), "index.GetDocumentRequest") + proto.RegisterType((*GetDocumentResponse)(nil), "index.GetDocumentResponse") + proto.RegisterType((*IndexDocumentRequest)(nil), "index.IndexDocumentRequest") + proto.RegisterType((*IndexDocumentResponse)(nil), "index.IndexDocumentResponse") + proto.RegisterType((*DeleteDocumentRequest)(nil), "index.DeleteDocumentRequest") + proto.RegisterType((*DeleteDocumentResponse)(nil), "index.DeleteDocumentResponse") + proto.RegisterType((*SearchRequest)(nil), "index.SearchRequest") + proto.RegisterType((*SearchResponse)(nil), "index.SearchResponse") + proto.RegisterType((*GetIndexConfigResponse)(nil), "index.GetIndexConfigResponse") + proto.RegisterType((*GetIndexStatsResponse)(nil), "index.GetIndexStatsResponse") + proto.RegisterType((*Document)(nil), "index.Document") +} + +func init() { proto.RegisterFile("protobuf/index/index.proto", fileDescriptor_7b2daf652facb3ae) } + +var fileDescriptor_7b2daf652facb3ae = []byte{ + // 755 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xef, 0x6f, 0xd2, 0x50, + 0x14, 0x2d, 0x44, 0xc6, 0x76, 0x19, 0x88, 0x4f, 0x20, 0x5b, 0xb7, 0x25, 0xcb, 0x9b, 0xd1, 0x99, + 0x68, 0x31, 0x53, 0x33, 0x7f, 0x7d, 0x10, 0x07, 0xe2, 0xb2, 0x85, 0xb9, 0x32, 0xb7, 0x68, 0x62, + 0x48, 0xa1, 0x6f, 0xd0, 0x08, 0x7d, 0xc8, 0x7b, 0x35, 0x2e, 0xf1, 0x9b, 0x7f, 0xa3, 0xff, 0x8f, + 0x69, 0x5f, 0x5b, 0xda, 0xae, 0x2d, 0x4b, 0xf6, 0x85, 0xe4, 0xdd, 0x77, 0xee, 0x39, 0xf7, 0x5d, + 0xee, 0x3d, 0x29, 0xc8, 0xd3, 0x19, 0xe5, 0xb4, 0x6f, 0x5d, 0xd6, 0x0d, 0x53, 0x27, 0xbf, 0xc5, + 0xaf, 0xe2, 0x04, 0x51, 0xce, 0x39, 0xc8, 0xeb, 0x43, 0x4a, 0x87, 0x63, 0x52, 0xf7, 0x91, 0x9a, + 0x79, 0x25, 0x10, 0xf2, 0x46, 0xf4, 0x8a, 0x4c, 0xa6, 0xdc, 0xbd, 0xc4, 0x7f, 0xa0, 0x7a, 0x6c, + 0xfc, 0x22, 0x26, 0x61, 0xec, 0xf3, 0x8c, 0xf6, 0x89, 0x4a, 0xd8, 0x94, 0x9a, 0x8c, 0xa0, 0x57, + 0x90, 0x63, 0x5c, 0xe3, 0x64, 0x2d, 0xb3, 0x9d, 0xd9, 0x2d, 0xed, 0x61, 0x45, 0x88, 0xc6, 0x82, + 0x95, 0xae, 0x8d, 0x54, 0x45, 0x02, 0x7e, 0x0c, 0x39, 0xe7, 0x8c, 0x0a, 0x90, 0xff, 0xd2, 0x39, + 0xea, 0x9c, 0x5c, 0x74, 0xca, 0x12, 0x5a, 0x81, 0x5c, 0xe3, 0xf8, 0xf0, 0xbc, 0x55, 0xce, 0xa0, + 0x65, 0xb8, 0xd3, 0x6c, 0x35, 0x9a, 0xe5, 0x2c, 0xfe, 0x9b, 0x81, 0x9a, 0x4a, 0x34, 0xdd, 0xb8, + 0xae, 0xff, 0x3a, 0xac, 0xbf, 0xe3, 0xea, 0xc7, 0xa3, 0xc3, 0x05, 0x28, 0x49, 0x05, 0xa8, 0xad, + 0x46, 0xf3, 0x6b, 0x39, 0x83, 0x8a, 0xb0, 0xd2, 0x39, 0x39, 0xeb, 0x89, 0x63, 0x16, 0x6f, 0x43, + 0xa9, 0x4d, 0x78, 0x87, 0xea, 0x44, 0x25, 0x3f, 0x2d, 0xc2, 0x38, 0x2a, 0x41, 0xd6, 0xd0, 0x1d, + 0xe5, 0x15, 0x35, 0x6b, 0xe8, 0xf8, 0x3b, 0xdc, 0xf5, 0x11, 0x6e, 0x7d, 0x2f, 0x00, 0x4c, 0xaa, + 0x93, 0x03, 0x6a, 0x5e, 0x1a, 0x43, 0x07, 0x5a, 0xd8, 0xab, 0x28, 0xa2, 0xd5, 0x8a, 0xd7, 0x6a, + 0xa5, 0x61, 0x5e, 0xa9, 0x01, 0x1c, 0xaa, 0x78, 0xaf, 0xca, 0x3a, 0xdc, 0x6e, 0xc1, 0xe7, 0x50, + 0xea, 0xa6, 0x16, 0x10, 0x51, 0xcb, 0xde, 0x4c, 0x0d, 0xef, 0xc0, 0xbd, 0x26, 0x19, 0x13, 0x4e, + 0xd2, 0xde, 0xd6, 0x04, 0xd4, 0x26, 0xfc, 0x60, 0x6c, 0x31, 0x4e, 0x66, 0xfe, 0xf3, 0x14, 0xc8, + 0x0f, 0x44, 0x28, 0xf5, 0x6d, 0x1e, 0x08, 0x3f, 0x70, 0x58, 0x9a, 0x74, 0x60, 0x4d, 0x88, 0xc9, + 0x93, 0xb4, 0x0e, 0xe0, 0x7e, 0x08, 0xe5, 0x8a, 0x3d, 0x81, 0xa5, 0x4b, 0x83, 0x8c, 0x75, 0x96, + 0xaa, 0xe5, 0x62, 0xf0, 0x19, 0x54, 0x0e, 0xed, 0x59, 0x58, 0x20, 0x16, 0x60, 0xcd, 0xde, 0x80, + 0xf5, 0x29, 0x54, 0x23, 0xac, 0x6e, 0x71, 0x15, 0xc8, 0x0d, 0xa8, 0x65, 0x72, 0x87, 0x39, 0xa7, + 0x8a, 0x03, 0x7e, 0x04, 0x55, 0xd1, 0xda, 0x45, 0x4f, 0x56, 0xa0, 0x16, 0x05, 0xa6, 0x12, 0x1f, + 0x43, 0xb1, 0x4b, 0xb4, 0xd9, 0x60, 0xe4, 0x11, 0xbe, 0x85, 0x12, 0x73, 0x02, 0xbd, 0x99, 0x88, + 0xa4, 0x36, 0xa9, 0xc8, 0x82, 0xc9, 0xf8, 0xc8, 0x9e, 0x2c, 0x11, 0xf0, 0xf7, 0xaa, 0xe8, 0xd3, + 0x31, 0x6b, 0x9c, 0xce, 0xb6, 0xea, 0xb1, 0xd9, 0x48, 0x7c, 0x0a, 0xb5, 0x36, 0xe1, 0x4e, 0x97, + 0xc4, 0x80, 0xf9, 0xa4, 0xfb, 0xb0, 0xea, 0xac, 0x67, 0x6f, 0xb0, 0x78, 0x1d, 0x0a, 0xc6, 0x9c, + 0x00, 0x77, 0xa0, 0xea, 0x51, 0xda, 0x2b, 0xcb, 0x7c, 0xc6, 0x97, 0x20, 0x70, 0x3d, 0x7b, 0x43, + 0xd2, 0xe7, 0x02, 0x0c, 0x3f, 0x1d, 0x7f, 0x82, 0x65, 0xaf, 0xcf, 0xb7, 0x9b, 0x87, 0xbd, 0x7f, + 0x79, 0xc8, 0x39, 0x75, 0xa1, 0x36, 0x14, 0x43, 0xae, 0x87, 0x6a, 0xd7, 0x12, 0x5b, 0xb6, 0xa3, + 0xca, 0x9b, 0x69, 0x1e, 0x89, 0x25, 0x74, 0x08, 0xa5, 0xb0, 0x7d, 0x25, 0x32, 0x6d, 0xa5, 0xba, + 0x1d, 0x96, 0xd0, 0x1b, 0xc8, 0xbb, 0x86, 0x84, 0xaa, 0x2e, 0x36, 0x6c, 0x61, 0x72, 0x2d, 0x1a, + 0x0e, 0xe6, 0x76, 0x23, 0xb9, 0xdd, 0x68, 0x6e, 0x6c, 0x59, 0x58, 0x42, 0xef, 0x01, 0xe6, 0x8e, + 0x82, 0xd6, 0xdc, 0xf4, 0x6b, 0x26, 0x93, 0xc2, 0xd0, 0x00, 0x98, 0xdb, 0x4d, 0x62, 0x03, 0xd6, + 0xe7, 0xd5, 0x47, 0x9c, 0x09, 0x4b, 0xa8, 0x05, 0xab, 0x17, 0x1a, 0x1f, 0x8c, 0x6e, 0x43, 0xf2, + 0x2c, 0x83, 0x3e, 0x42, 0x21, 0x60, 0x46, 0x28, 0x80, 0x8e, 0xec, 0xb4, 0x2c, 0xc7, 0x5d, 0xf9, + 0xe5, 0x74, 0xa0, 0x18, 0x72, 0x0e, 0xb4, 0xe1, 0xc2, 0xe3, 0x5c, 0xca, 0x1f, 0x92, 0x58, 0xb3, + 0xc1, 0xd2, 0x6e, 0x06, 0x9d, 0x42, 0x29, 0xec, 0x18, 0x68, 0x33, 0xd4, 0xe7, 0x28, 0xe3, 0x56, + 0xc2, 0x6d, 0x80, 0x72, 0x1f, 0x96, 0x84, 0x0d, 0xa0, 0x8a, 0xff, 0x8f, 0x07, 0x6c, 0x42, 0xae, + 0x46, 0xa2, 0xc1, 0x91, 0x0d, 0xaf, 0xfc, 0xc2, 0x91, 0x8d, 0x77, 0x08, 0x2c, 0xd9, 0x6b, 0x14, + 0x5a, 0xf5, 0x85, 0x6b, 0x14, 0x6b, 0x0c, 0x58, 0x42, 0xef, 0x60, 0xb9, 0x6b, 0x6a, 0x53, 0x36, + 0xa2, 0x3c, 0x91, 0x23, 0x71, 0xfe, 0x3e, 0xec, 0x7e, 0x7b, 0x38, 0x34, 0xf8, 0xc8, 0xea, 0x2b, + 0x03, 0x3a, 0xa9, 0x4f, 0x28, 0xb3, 0x7e, 0x68, 0xf5, 0xfe, 0x58, 0x63, 0xbc, 0x1e, 0xfe, 0xca, + 0xea, 0x2f, 0x39, 0xe7, 0xe7, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x23, 0xe9, 0x66, 0x7e, + 0x09, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1211,98 +907,94 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// BlastClient is the client API for Blast service. +// IndexClient is the client API for Index service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type BlastClient interface { +type IndexClient interface { LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) SetNode(ctx context.Context, in *SetNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) GetCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetClusterResponse, error) - WatchCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Blast_WatchClusterClient, error) - Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) - GetValue(ctx context.Context, in *GetValueRequest, opts ...grpc.CallOption) (*GetValueResponse, error) - SetValue(ctx context.Context, in *SetValueRequest, opts ...grpc.CallOption) (*empty.Empty, error) - DeleteValue(ctx context.Context, in *DeleteValueRequest, opts ...grpc.CallOption) (*empty.Empty, error) - WatchStore(ctx context.Context, in *WatchStoreRequest, opts ...grpc.CallOption) (Blast_WatchStoreClient, error) + WatchCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_WatchClusterClient, error) GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) - IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Blast_IndexDocumentClient, error) - DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Blast_DeleteDocumentClient, error) + IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Index_IndexDocumentClient, error) + DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Index_DeleteDocumentClient, error) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) GetIndexConfig(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexConfigResponse, error) GetIndexStats(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexStatsResponse, error) + Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) } -type blastClient struct { +type indexClient struct { cc *grpc.ClientConn } -func NewBlastClient(cc *grpc.ClientConn) BlastClient { - return &blastClient{cc} +func NewIndexClient(cc *grpc.ClientConn) IndexClient { + return &indexClient{cc} } -func (c *blastClient) LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) { +func (c *indexClient) LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) { out := new(LivenessProbeResponse) - err := c.cc.Invoke(ctx, "/protobuf.Blast/LivenessProbe", in, out, opts...) + err := c.cc.Invoke(ctx, "/index.Index/LivenessProbe", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *blastClient) ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) { +func (c *indexClient) ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) { out := new(ReadinessProbeResponse) - err := c.cc.Invoke(ctx, "/protobuf.Blast/ReadinessProbe", in, out, opts...) + err := c.cc.Invoke(ctx, "/index.Index/ReadinessProbe", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *blastClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) { +func (c *indexClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) { out := new(GetNodeResponse) - err := c.cc.Invoke(ctx, "/protobuf.Blast/GetNode", in, out, opts...) + err := c.cc.Invoke(ctx, "/index.Index/GetNode", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *blastClient) SetNode(ctx context.Context, in *SetNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { +func (c *indexClient) SetNode(ctx context.Context, in *SetNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/protobuf.Blast/SetNode", in, out, opts...) + err := c.cc.Invoke(ctx, "/index.Index/SetNode", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *blastClient) DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { +func (c *indexClient) DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/protobuf.Blast/DeleteNode", in, out, opts...) + err := c.cc.Invoke(ctx, "/index.Index/DeleteNode", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *blastClient) GetCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetClusterResponse, error) { +func (c *indexClient) GetCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetClusterResponse, error) { out := new(GetClusterResponse) - err := c.cc.Invoke(ctx, "/protobuf.Blast/GetCluster", in, out, opts...) + err := c.cc.Invoke(ctx, "/index.Index/GetCluster", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *blastClient) WatchCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Blast_WatchClusterClient, error) { - stream, err := c.cc.NewStream(ctx, &_Blast_serviceDesc.Streams[0], "/protobuf.Blast/WatchCluster", opts...) +func (c *indexClient) WatchCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_WatchClusterClient, error) { + stream, err := c.cc.NewStream(ctx, &_Index_serviceDesc.Streams[0], "/index.Index/WatchCluster", opts...) if err != nil { return nil, err } - x := &blastWatchClusterClient{stream} + x := &indexWatchClusterClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -1312,16 +1004,16 @@ func (c *blastClient) WatchCluster(ctx context.Context, in *empty.Empty, opts .. return x, nil } -type Blast_WatchClusterClient interface { +type Index_WatchClusterClient interface { Recv() (*GetClusterResponse, error) grpc.ClientStream } -type blastWatchClusterClient struct { +type indexWatchClusterClient struct { grpc.ClientStream } -func (x *blastWatchClusterClient) Recv() (*GetClusterResponse, error) { +func (x *indexWatchClusterClient) Recv() (*GetClusterResponse, error) { m := new(GetClusterResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err @@ -1329,107 +1021,39 @@ func (x *blastWatchClusterClient) Recv() (*GetClusterResponse, error) { return m, nil } -func (c *blastClient) Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/protobuf.Blast/Snapshot", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *blastClient) GetValue(ctx context.Context, in *GetValueRequest, opts ...grpc.CallOption) (*GetValueResponse, error) { - out := new(GetValueResponse) - err := c.cc.Invoke(ctx, "/protobuf.Blast/GetValue", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *blastClient) SetValue(ctx context.Context, in *SetValueRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/protobuf.Blast/SetValue", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *blastClient) DeleteValue(ctx context.Context, in *DeleteValueRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/protobuf.Blast/DeleteValue", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *blastClient) WatchStore(ctx context.Context, in *WatchStoreRequest, opts ...grpc.CallOption) (Blast_WatchStoreClient, error) { - stream, err := c.cc.NewStream(ctx, &_Blast_serviceDesc.Streams[1], "/protobuf.Blast/WatchStore", opts...) - if err != nil { - return nil, err - } - x := &blastWatchStoreClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Blast_WatchStoreClient interface { - Recv() (*WatchStoreResponse, error) - grpc.ClientStream -} - -type blastWatchStoreClient struct { - grpc.ClientStream -} - -func (x *blastWatchStoreClient) Recv() (*WatchStoreResponse, error) { - m := new(WatchStoreResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *blastClient) GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) { +func (c *indexClient) GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) { out := new(GetDocumentResponse) - err := c.cc.Invoke(ctx, "/protobuf.Blast/GetDocument", in, out, opts...) + err := c.cc.Invoke(ctx, "/index.Index/GetDocument", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *blastClient) IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Blast_IndexDocumentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Blast_serviceDesc.Streams[2], "/protobuf.Blast/IndexDocument", opts...) +func (c *indexClient) IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Index_IndexDocumentClient, error) { + stream, err := c.cc.NewStream(ctx, &_Index_serviceDesc.Streams[1], "/index.Index/IndexDocument", opts...) if err != nil { return nil, err } - x := &blastIndexDocumentClient{stream} + x := &indexIndexDocumentClient{stream} return x, nil } -type Blast_IndexDocumentClient interface { +type Index_IndexDocumentClient interface { Send(*IndexDocumentRequest) error CloseAndRecv() (*IndexDocumentResponse, error) grpc.ClientStream } -type blastIndexDocumentClient struct { +type indexIndexDocumentClient struct { grpc.ClientStream } -func (x *blastIndexDocumentClient) Send(m *IndexDocumentRequest) error { +func (x *indexIndexDocumentClient) Send(m *IndexDocumentRequest) error { return x.ClientStream.SendMsg(m) } -func (x *blastIndexDocumentClient) CloseAndRecv() (*IndexDocumentResponse, error) { +func (x *indexIndexDocumentClient) CloseAndRecv() (*IndexDocumentResponse, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } @@ -1440,30 +1064,30 @@ func (x *blastIndexDocumentClient) CloseAndRecv() (*IndexDocumentResponse, error return m, nil } -func (c *blastClient) DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Blast_DeleteDocumentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Blast_serviceDesc.Streams[3], "/protobuf.Blast/DeleteDocument", opts...) +func (c *indexClient) DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Index_DeleteDocumentClient, error) { + stream, err := c.cc.NewStream(ctx, &_Index_serviceDesc.Streams[2], "/index.Index/DeleteDocument", opts...) if err != nil { return nil, err } - x := &blastDeleteDocumentClient{stream} + x := &indexDeleteDocumentClient{stream} return x, nil } -type Blast_DeleteDocumentClient interface { +type Index_DeleteDocumentClient interface { Send(*DeleteDocumentRequest) error CloseAndRecv() (*DeleteDocumentResponse, error) grpc.ClientStream } -type blastDeleteDocumentClient struct { +type indexDeleteDocumentClient struct { grpc.ClientStream } -func (x *blastDeleteDocumentClient) Send(m *DeleteDocumentRequest) error { +func (x *indexDeleteDocumentClient) Send(m *DeleteDocumentRequest) error { return x.ClientStream.SendMsg(m) } -func (x *blastDeleteDocumentClient) CloseAndRecv() (*DeleteDocumentResponse, error) { +func (x *indexDeleteDocumentClient) CloseAndRecv() (*DeleteDocumentResponse, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } @@ -1474,318 +1098,230 @@ func (x *blastDeleteDocumentClient) CloseAndRecv() (*DeleteDocumentResponse, err return m, nil } -func (c *blastClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { +func (c *indexClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { out := new(SearchResponse) - err := c.cc.Invoke(ctx, "/protobuf.Blast/Search", in, out, opts...) + err := c.cc.Invoke(ctx, "/index.Index/Search", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *blastClient) GetIndexConfig(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexConfigResponse, error) { +func (c *indexClient) GetIndexConfig(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexConfigResponse, error) { out := new(GetIndexConfigResponse) - err := c.cc.Invoke(ctx, "/protobuf.Blast/GetIndexConfig", in, out, opts...) + err := c.cc.Invoke(ctx, "/index.Index/GetIndexConfig", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *blastClient) GetIndexStats(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexStatsResponse, error) { +func (c *indexClient) GetIndexStats(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexStatsResponse, error) { out := new(GetIndexStatsResponse) - err := c.cc.Invoke(ctx, "/protobuf.Blast/GetIndexStats", in, out, opts...) + err := c.cc.Invoke(ctx, "/index.Index/GetIndexStats", in, out, opts...) if err != nil { return nil, err } return out, nil } -// BlastServer is the server API for Blast service. -type BlastServer interface { +func (c *indexClient) Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Snapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// IndexServer is the server API for Index service. +type IndexServer interface { LivenessProbe(context.Context, *empty.Empty) (*LivenessProbeResponse, error) ReadinessProbe(context.Context, *empty.Empty) (*ReadinessProbeResponse, error) GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error) SetNode(context.Context, *SetNodeRequest) (*empty.Empty, error) DeleteNode(context.Context, *DeleteNodeRequest) (*empty.Empty, error) GetCluster(context.Context, *empty.Empty) (*GetClusterResponse, error) - WatchCluster(*empty.Empty, Blast_WatchClusterServer) error - Snapshot(context.Context, *empty.Empty) (*empty.Empty, error) - GetValue(context.Context, *GetValueRequest) (*GetValueResponse, error) - SetValue(context.Context, *SetValueRequest) (*empty.Empty, error) - DeleteValue(context.Context, *DeleteValueRequest) (*empty.Empty, error) - WatchStore(*WatchStoreRequest, Blast_WatchStoreServer) error + WatchCluster(*empty.Empty, Index_WatchClusterServer) error GetDocument(context.Context, *GetDocumentRequest) (*GetDocumentResponse, error) - IndexDocument(Blast_IndexDocumentServer) error - DeleteDocument(Blast_DeleteDocumentServer) error + IndexDocument(Index_IndexDocumentServer) error + DeleteDocument(Index_DeleteDocumentServer) error Search(context.Context, *SearchRequest) (*SearchResponse, error) GetIndexConfig(context.Context, *empty.Empty) (*GetIndexConfigResponse, error) GetIndexStats(context.Context, *empty.Empty) (*GetIndexStatsResponse, error) + Snapshot(context.Context, *empty.Empty) (*empty.Empty, error) } -func RegisterBlastServer(s *grpc.Server, srv BlastServer) { - s.RegisterService(&_Blast_serviceDesc, srv) +func RegisterIndexServer(s *grpc.Server, srv IndexServer) { + s.RegisterService(&_Index_serviceDesc, srv) } -func _Blast_LivenessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _Index_LivenessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(BlastServer).LivenessProbe(ctx, in) + return srv.(IndexServer).LivenessProbe(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/protobuf.Blast/LivenessProbe", + FullMethod: "/index.Index/LivenessProbe", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).LivenessProbe(ctx, req.(*empty.Empty)) + return srv.(IndexServer).LivenessProbe(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } -func _Blast_ReadinessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _Index_ReadinessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(BlastServer).ReadinessProbe(ctx, in) + return srv.(IndexServer).ReadinessProbe(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/protobuf.Blast/ReadinessProbe", + FullMethod: "/index.Index/ReadinessProbe", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).ReadinessProbe(ctx, req.(*empty.Empty)) + return srv.(IndexServer).ReadinessProbe(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } -func _Blast_GetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _Index_GetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetNodeRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(BlastServer).GetNode(ctx, in) + return srv.(IndexServer).GetNode(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/protobuf.Blast/GetNode", + FullMethod: "/index.Index/GetNode", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).GetNode(ctx, req.(*GetNodeRequest)) + return srv.(IndexServer).GetNode(ctx, req.(*GetNodeRequest)) } return interceptor(ctx, in, info, handler) } -func _Blast_SetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _Index_SetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SetNodeRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(BlastServer).SetNode(ctx, in) + return srv.(IndexServer).SetNode(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/protobuf.Blast/SetNode", + FullMethod: "/index.Index/SetNode", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).SetNode(ctx, req.(*SetNodeRequest)) + return srv.(IndexServer).SetNode(ctx, req.(*SetNodeRequest)) } return interceptor(ctx, in, info, handler) } -func _Blast_DeleteNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _Index_DeleteNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteNodeRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(BlastServer).DeleteNode(ctx, in) + return srv.(IndexServer).DeleteNode(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/protobuf.Blast/DeleteNode", + FullMethod: "/index.Index/DeleteNode", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).DeleteNode(ctx, req.(*DeleteNodeRequest)) + return srv.(IndexServer).DeleteNode(ctx, req.(*DeleteNodeRequest)) } return interceptor(ctx, in, info, handler) } -func _Blast_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _Index_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(BlastServer).GetCluster(ctx, in) + return srv.(IndexServer).GetCluster(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/protobuf.Blast/GetCluster", + FullMethod: "/index.Index/GetCluster", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).GetCluster(ctx, req.(*empty.Empty)) + return srv.(IndexServer).GetCluster(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } -func _Blast_WatchCluster_Handler(srv interface{}, stream grpc.ServerStream) error { +func _Index_WatchCluster_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(empty.Empty) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(BlastServer).WatchCluster(m, &blastWatchClusterServer{stream}) + return srv.(IndexServer).WatchCluster(m, &indexWatchClusterServer{stream}) } -type Blast_WatchClusterServer interface { +type Index_WatchClusterServer interface { Send(*GetClusterResponse) error grpc.ServerStream } -type blastWatchClusterServer struct { - grpc.ServerStream -} - -func (x *blastWatchClusterServer) Send(m *GetClusterResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Blast_Snapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlastServer).Snapshot(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/protobuf.Blast/Snapshot", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).Snapshot(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Blast_GetValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetValueRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlastServer).GetValue(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/protobuf.Blast/GetValue", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).GetValue(ctx, req.(*GetValueRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Blast_SetValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetValueRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlastServer).SetValue(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/protobuf.Blast/SetValue", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).SetValue(ctx, req.(*SetValueRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Blast_DeleteValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteValueRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BlastServer).DeleteValue(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/protobuf.Blast/DeleteValue", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).DeleteValue(ctx, req.(*DeleteValueRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Blast_WatchStore_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(WatchStoreRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(BlastServer).WatchStore(m, &blastWatchStoreServer{stream}) -} - -type Blast_WatchStoreServer interface { - Send(*WatchStoreResponse) error - grpc.ServerStream -} - -type blastWatchStoreServer struct { +type indexWatchClusterServer struct { grpc.ServerStream } -func (x *blastWatchStoreServer) Send(m *WatchStoreResponse) error { +func (x *indexWatchClusterServer) Send(m *GetClusterResponse) error { return x.ServerStream.SendMsg(m) } -func _Blast_GetDocument_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _Index_GetDocument_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetDocumentRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(BlastServer).GetDocument(ctx, in) + return srv.(IndexServer).GetDocument(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/protobuf.Blast/GetDocument", + FullMethod: "/index.Index/GetDocument", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).GetDocument(ctx, req.(*GetDocumentRequest)) + return srv.(IndexServer).GetDocument(ctx, req.(*GetDocumentRequest)) } return interceptor(ctx, in, info, handler) } -func _Blast_IndexDocument_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(BlastServer).IndexDocument(&blastIndexDocumentServer{stream}) +func _Index_IndexDocument_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(IndexServer).IndexDocument(&indexIndexDocumentServer{stream}) } -type Blast_IndexDocumentServer interface { +type Index_IndexDocumentServer interface { SendAndClose(*IndexDocumentResponse) error Recv() (*IndexDocumentRequest, error) grpc.ServerStream } -type blastIndexDocumentServer struct { +type indexIndexDocumentServer struct { grpc.ServerStream } -func (x *blastIndexDocumentServer) SendAndClose(m *IndexDocumentResponse) error { +func (x *indexIndexDocumentServer) SendAndClose(m *IndexDocumentResponse) error { return x.ServerStream.SendMsg(m) } -func (x *blastIndexDocumentServer) Recv() (*IndexDocumentRequest, error) { +func (x *indexIndexDocumentServer) Recv() (*IndexDocumentRequest, error) { m := new(IndexDocumentRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err @@ -1793,25 +1329,25 @@ func (x *blastIndexDocumentServer) Recv() (*IndexDocumentRequest, error) { return m, nil } -func _Blast_DeleteDocument_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(BlastServer).DeleteDocument(&blastDeleteDocumentServer{stream}) +func _Index_DeleteDocument_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(IndexServer).DeleteDocument(&indexDeleteDocumentServer{stream}) } -type Blast_DeleteDocumentServer interface { +type Index_DeleteDocumentServer interface { SendAndClose(*DeleteDocumentResponse) error Recv() (*DeleteDocumentRequest, error) grpc.ServerStream } -type blastDeleteDocumentServer struct { +type indexDeleteDocumentServer struct { grpc.ServerStream } -func (x *blastDeleteDocumentServer) SendAndClose(m *DeleteDocumentResponse) error { +func (x *indexDeleteDocumentServer) SendAndClose(m *DeleteDocumentResponse) error { return x.ServerStream.SendMsg(m) } -func (x *blastDeleteDocumentServer) Recv() (*DeleteDocumentRequest, error) { +func (x *indexDeleteDocumentServer) Recv() (*DeleteDocumentRequest, error) { m := new(DeleteDocumentRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err @@ -1819,142 +1355,143 @@ func (x *blastDeleteDocumentServer) Recv() (*DeleteDocumentRequest, error) { return m, nil } -func _Blast_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _Index_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SearchRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(BlastServer).Search(ctx, in) + return srv.(IndexServer).Search(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/protobuf.Blast/Search", + FullMethod: "/index.Index/Search", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).Search(ctx, req.(*SearchRequest)) + return srv.(IndexServer).Search(ctx, req.(*SearchRequest)) } return interceptor(ctx, in, info, handler) } -func _Blast_GetIndexConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _Index_GetIndexConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(BlastServer).GetIndexConfig(ctx, in) + return srv.(IndexServer).GetIndexConfig(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/protobuf.Blast/GetIndexConfig", + FullMethod: "/index.Index/GetIndexConfig", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).GetIndexConfig(ctx, req.(*empty.Empty)) + return srv.(IndexServer).GetIndexConfig(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } -func _Blast_GetIndexStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _Index_GetIndexStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(BlastServer).GetIndexStats(ctx, in) + return srv.(IndexServer).GetIndexStats(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/protobuf.Blast/GetIndexStats", + FullMethod: "/index.Index/GetIndexStats", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BlastServer).GetIndexStats(ctx, req.(*empty.Empty)) + return srv.(IndexServer).GetIndexStats(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } -var _Blast_serviceDesc = grpc.ServiceDesc{ - ServiceName: "protobuf.Blast", - HandlerType: (*BlastServer)(nil), +func _Index_Snapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Snapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Snapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Snapshot(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _Index_serviceDesc = grpc.ServiceDesc{ + ServiceName: "index.Index", + HandlerType: (*IndexServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "LivenessProbe", - Handler: _Blast_LivenessProbe_Handler, + Handler: _Index_LivenessProbe_Handler, }, { MethodName: "ReadinessProbe", - Handler: _Blast_ReadinessProbe_Handler, + Handler: _Index_ReadinessProbe_Handler, }, { MethodName: "GetNode", - Handler: _Blast_GetNode_Handler, + Handler: _Index_GetNode_Handler, }, { MethodName: "SetNode", - Handler: _Blast_SetNode_Handler, + Handler: _Index_SetNode_Handler, }, { MethodName: "DeleteNode", - Handler: _Blast_DeleteNode_Handler, + Handler: _Index_DeleteNode_Handler, }, { MethodName: "GetCluster", - Handler: _Blast_GetCluster_Handler, - }, - { - MethodName: "Snapshot", - Handler: _Blast_Snapshot_Handler, - }, - { - MethodName: "GetValue", - Handler: _Blast_GetValue_Handler, - }, - { - MethodName: "SetValue", - Handler: _Blast_SetValue_Handler, - }, - { - MethodName: "DeleteValue", - Handler: _Blast_DeleteValue_Handler, + Handler: _Index_GetCluster_Handler, }, { MethodName: "GetDocument", - Handler: _Blast_GetDocument_Handler, + Handler: _Index_GetDocument_Handler, }, { MethodName: "Search", - Handler: _Blast_Search_Handler, + Handler: _Index_Search_Handler, }, { MethodName: "GetIndexConfig", - Handler: _Blast_GetIndexConfig_Handler, + Handler: _Index_GetIndexConfig_Handler, }, { MethodName: "GetIndexStats", - Handler: _Blast_GetIndexStats_Handler, + Handler: _Index_GetIndexStats_Handler, + }, + { + MethodName: "Snapshot", + Handler: _Index_Snapshot_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "WatchCluster", - Handler: _Blast_WatchCluster_Handler, - ServerStreams: true, - }, - { - StreamName: "WatchStore", - Handler: _Blast_WatchStore_Handler, + Handler: _Index_WatchCluster_Handler, ServerStreams: true, }, { StreamName: "IndexDocument", - Handler: _Blast_IndexDocument_Handler, + Handler: _Index_IndexDocument_Handler, ClientStreams: true, }, { StreamName: "DeleteDocument", - Handler: _Blast_DeleteDocument_Handler, + Handler: _Index_DeleteDocument_Handler, ClientStreams: true, }, }, - Metadata: "protobuf/blast.proto", + Metadata: "protobuf/index/index.proto", } diff --git a/protobuf/blast.proto b/protobuf/index/index.proto similarity index 79% rename from protobuf/blast.proto rename to protobuf/index/index.proto index 1980afa..4629c22 100644 --- a/protobuf/blast.proto +++ b/protobuf/index/index.proto @@ -17,11 +17,11 @@ syntax = "proto3"; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; -package protobuf; +package index; -option go_package = "github.com/mosuka/blast/protobuf"; +option go_package = "github.com/mosuka/blast/protobuf/index"; -service Blast { +service Index { rpc LivenessProbe (google.protobuf.Empty) returns (LivenessProbeResponse) {} rpc ReadinessProbe (google.protobuf.Empty) returns (ReadinessProbeResponse) {} @@ -30,12 +30,6 @@ service Blast { rpc DeleteNode (DeleteNodeRequest) returns (google.protobuf.Empty) {} rpc GetCluster (google.protobuf.Empty) returns (GetClusterResponse) {} rpc WatchCluster (google.protobuf.Empty) returns (stream GetClusterResponse) {} - rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) {} - - rpc GetValue (GetValueRequest) returns (GetValueResponse) {} - rpc SetValue (SetValueRequest) returns (google.protobuf.Empty) {} - rpc DeleteValue (DeleteValueRequest) returns (google.protobuf.Empty) {} - rpc WatchStore (WatchStoreRequest) returns (stream WatchStoreResponse) {} rpc GetDocument (GetDocumentRequest) returns (GetDocumentResponse) {} rpc IndexDocument (stream IndexDocumentRequest) returns (IndexDocumentResponse) {} @@ -43,6 +37,7 @@ service Blast { rpc Search (SearchRequest) returns (SearchResponse) {} rpc GetIndexConfig (google.protobuf.Empty) returns (GetIndexConfigResponse) {} rpc GetIndexStats (google.protobuf.Empty) returns (GetIndexStatsResponse) {} + rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) {} } // use for health check @@ -92,38 +87,6 @@ message GetClusterResponse { google.protobuf.Any cluster = 1; } -message GetValueRequest { - string key = 1; -} - -message GetValueResponse { - google.protobuf.Any value = 1; -} - -message SetValueRequest { - string key = 1; - google.protobuf.Any value = 2; -} - -message DeleteValueRequest { - string key = 1; -} - -message WatchStoreRequest { - string key = 1; -} - -message WatchStoreResponse { - enum Command { - UNKNOWN = 0; - SET = 1; - DELETE = 2; - } - Command command = 1; - string key = 2; - google.protobuf.Any value = 3; -} - message GetDocumentRequest { string id = 1; } diff --git a/protobuf/management/management.pb.go b/protobuf/management/management.pb.go new file mode 100644 index 0000000..34000af --- /dev/null +++ b/protobuf/management/management.pb.go @@ -0,0 +1,1223 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: protobuf/management/management.proto + +package management + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + empty "github.com/golang/protobuf/ptypes/empty" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type LivenessProbeResponse_State int32 + +const ( + LivenessProbeResponse_UNKNOWN LivenessProbeResponse_State = 0 + LivenessProbeResponse_ALIVE LivenessProbeResponse_State = 1 + LivenessProbeResponse_DEAD LivenessProbeResponse_State = 2 +) + +var LivenessProbeResponse_State_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ALIVE", + 2: "DEAD", +} + +var LivenessProbeResponse_State_value = map[string]int32{ + "UNKNOWN": 0, + "ALIVE": 1, + "DEAD": 2, +} + +func (x LivenessProbeResponse_State) String() string { + return proto.EnumName(LivenessProbeResponse_State_name, int32(x)) +} + +func (LivenessProbeResponse_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{0, 0} +} + +type ReadinessProbeResponse_State int32 + +const ( + ReadinessProbeResponse_UNKNOWN ReadinessProbeResponse_State = 0 + ReadinessProbeResponse_READY ReadinessProbeResponse_State = 1 + ReadinessProbeResponse_NOT_READY ReadinessProbeResponse_State = 2 +) + +var ReadinessProbeResponse_State_name = map[int32]string{ + 0: "UNKNOWN", + 1: "READY", + 2: "NOT_READY", +} + +var ReadinessProbeResponse_State_value = map[string]int32{ + "UNKNOWN": 0, + "READY": 1, + "NOT_READY": 2, +} + +func (x ReadinessProbeResponse_State) String() string { + return proto.EnumName(ReadinessProbeResponse_State_name, int32(x)) +} + +func (ReadinessProbeResponse_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{1, 0} +} + +type WatchStoreResponse_Command int32 + +const ( + WatchStoreResponse_UNKNOWN WatchStoreResponse_Command = 0 + WatchStoreResponse_SET WatchStoreResponse_Command = 1 + WatchStoreResponse_DELETE WatchStoreResponse_Command = 2 +) + +var WatchStoreResponse_Command_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SET", + 2: "DELETE", +} + +var WatchStoreResponse_Command_value = map[string]int32{ + "UNKNOWN": 0, + "SET": 1, + "DELETE": 2, +} + +func (x WatchStoreResponse_Command) String() string { + return proto.EnumName(WatchStoreResponse_Command_name, int32(x)) +} + +func (WatchStoreResponse_Command) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{12, 0} +} + +// use for health check +type LivenessProbeResponse struct { + State LivenessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=management.LivenessProbeResponse_State" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LivenessProbeResponse) Reset() { *m = LivenessProbeResponse{} } +func (m *LivenessProbeResponse) String() string { return proto.CompactTextString(m) } +func (*LivenessProbeResponse) ProtoMessage() {} +func (*LivenessProbeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{0} +} + +func (m *LivenessProbeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LivenessProbeResponse.Unmarshal(m, b) +} +func (m *LivenessProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LivenessProbeResponse.Marshal(b, m, deterministic) +} +func (m *LivenessProbeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LivenessProbeResponse.Merge(m, src) +} +func (m *LivenessProbeResponse) XXX_Size() int { + return xxx_messageInfo_LivenessProbeResponse.Size(m) +} +func (m *LivenessProbeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LivenessProbeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LivenessProbeResponse proto.InternalMessageInfo + +func (m *LivenessProbeResponse) GetState() LivenessProbeResponse_State { + if m != nil { + return m.State + } + return LivenessProbeResponse_UNKNOWN +} + +// use for health check +type ReadinessProbeResponse struct { + State ReadinessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=management.ReadinessProbeResponse_State" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadinessProbeResponse) Reset() { *m = ReadinessProbeResponse{} } +func (m *ReadinessProbeResponse) String() string { return proto.CompactTextString(m) } +func (*ReadinessProbeResponse) ProtoMessage() {} +func (*ReadinessProbeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{1} +} + +func (m *ReadinessProbeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadinessProbeResponse.Unmarshal(m, b) +} +func (m *ReadinessProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadinessProbeResponse.Marshal(b, m, deterministic) +} +func (m *ReadinessProbeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadinessProbeResponse.Merge(m, src) +} +func (m *ReadinessProbeResponse) XXX_Size() int { + return xxx_messageInfo_ReadinessProbeResponse.Size(m) +} +func (m *ReadinessProbeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReadinessProbeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadinessProbeResponse proto.InternalMessageInfo + +func (m *ReadinessProbeResponse) GetState() ReadinessProbeResponse_State { + if m != nil { + return m.State + } + return ReadinessProbeResponse_UNKNOWN +} + +// use for raft cluster status +type GetNodeRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNodeRequest) Reset() { *m = GetNodeRequest{} } +func (m *GetNodeRequest) String() string { return proto.CompactTextString(m) } +func (*GetNodeRequest) ProtoMessage() {} +func (*GetNodeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{2} +} + +func (m *GetNodeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNodeRequest.Unmarshal(m, b) +} +func (m *GetNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNodeRequest.Marshal(b, m, deterministic) +} +func (m *GetNodeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNodeRequest.Merge(m, src) +} +func (m *GetNodeRequest) XXX_Size() int { + return xxx_messageInfo_GetNodeRequest.Size(m) +} +func (m *GetNodeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetNodeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNodeRequest proto.InternalMessageInfo + +func (m *GetNodeRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +// use for raft cluster status +type GetNodeResponse struct { + NodeConfig *any.Any `protobuf:"bytes,1,opt,name=nodeConfig,proto3" json:"nodeConfig,omitempty"` + State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNodeResponse) Reset() { *m = GetNodeResponse{} } +func (m *GetNodeResponse) String() string { return proto.CompactTextString(m) } +func (*GetNodeResponse) ProtoMessage() {} +func (*GetNodeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{3} +} + +func (m *GetNodeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNodeResponse.Unmarshal(m, b) +} +func (m *GetNodeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNodeResponse.Marshal(b, m, deterministic) +} +func (m *GetNodeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNodeResponse.Merge(m, src) +} +func (m *GetNodeResponse) XXX_Size() int { + return xxx_messageInfo_GetNodeResponse.Size(m) +} +func (m *GetNodeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetNodeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNodeResponse proto.InternalMessageInfo + +func (m *GetNodeResponse) GetNodeConfig() *any.Any { + if m != nil { + return m.NodeConfig + } + return nil +} + +func (m *GetNodeResponse) GetState() string { + if m != nil { + return m.State + } + return "" +} + +// use for raft cluster status +type SetNodeRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + NodeConfig *any.Any `protobuf:"bytes,2,opt,name=nodeConfig,proto3" json:"nodeConfig,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetNodeRequest) Reset() { *m = SetNodeRequest{} } +func (m *SetNodeRequest) String() string { return proto.CompactTextString(m) } +func (*SetNodeRequest) ProtoMessage() {} +func (*SetNodeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{4} +} + +func (m *SetNodeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetNodeRequest.Unmarshal(m, b) +} +func (m *SetNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetNodeRequest.Marshal(b, m, deterministic) +} +func (m *SetNodeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetNodeRequest.Merge(m, src) +} +func (m *SetNodeRequest) XXX_Size() int { + return xxx_messageInfo_SetNodeRequest.Size(m) +} +func (m *SetNodeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetNodeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetNodeRequest proto.InternalMessageInfo + +func (m *SetNodeRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *SetNodeRequest) GetNodeConfig() *any.Any { + if m != nil { + return m.NodeConfig + } + return nil +} + +// use for raft cluster status +type DeleteNodeRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteNodeRequest) Reset() { *m = DeleteNodeRequest{} } +func (m *DeleteNodeRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteNodeRequest) ProtoMessage() {} +func (*DeleteNodeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{5} +} + +func (m *DeleteNodeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteNodeRequest.Unmarshal(m, b) +} +func (m *DeleteNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteNodeRequest.Marshal(b, m, deterministic) +} +func (m *DeleteNodeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteNodeRequest.Merge(m, src) +} +func (m *DeleteNodeRequest) XXX_Size() int { + return xxx_messageInfo_DeleteNodeRequest.Size(m) +} +func (m *DeleteNodeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteNodeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteNodeRequest proto.InternalMessageInfo + +func (m *DeleteNodeRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +// use for raft cluster status +type GetClusterResponse struct { + Cluster *any.Any `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetClusterResponse) Reset() { *m = GetClusterResponse{} } +func (m *GetClusterResponse) String() string { return proto.CompactTextString(m) } +func (*GetClusterResponse) ProtoMessage() {} +func (*GetClusterResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{6} +} + +func (m *GetClusterResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetClusterResponse.Unmarshal(m, b) +} +func (m *GetClusterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetClusterResponse.Marshal(b, m, deterministic) +} +func (m *GetClusterResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetClusterResponse.Merge(m, src) +} +func (m *GetClusterResponse) XXX_Size() int { + return xxx_messageInfo_GetClusterResponse.Size(m) +} +func (m *GetClusterResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetClusterResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetClusterResponse proto.InternalMessageInfo + +func (m *GetClusterResponse) GetCluster() *any.Any { + if m != nil { + return m.Cluster + } + return nil +} + +type GetValueRequest struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetValueRequest) Reset() { *m = GetValueRequest{} } +func (m *GetValueRequest) String() string { return proto.CompactTextString(m) } +func (*GetValueRequest) ProtoMessage() {} +func (*GetValueRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{7} +} + +func (m *GetValueRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetValueRequest.Unmarshal(m, b) +} +func (m *GetValueRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetValueRequest.Marshal(b, m, deterministic) +} +func (m *GetValueRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetValueRequest.Merge(m, src) +} +func (m *GetValueRequest) XXX_Size() int { + return xxx_messageInfo_GetValueRequest.Size(m) +} +func (m *GetValueRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetValueRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetValueRequest proto.InternalMessageInfo + +func (m *GetValueRequest) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +type GetValueResponse struct { + Value *any.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetValueResponse) Reset() { *m = GetValueResponse{} } +func (m *GetValueResponse) String() string { return proto.CompactTextString(m) } +func (*GetValueResponse) ProtoMessage() {} +func (*GetValueResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{8} +} + +func (m *GetValueResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetValueResponse.Unmarshal(m, b) +} +func (m *GetValueResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetValueResponse.Marshal(b, m, deterministic) +} +func (m *GetValueResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetValueResponse.Merge(m, src) +} +func (m *GetValueResponse) XXX_Size() int { + return xxx_messageInfo_GetValueResponse.Size(m) +} +func (m *GetValueResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetValueResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetValueResponse proto.InternalMessageInfo + +func (m *GetValueResponse) GetValue() *any.Any { + if m != nil { + return m.Value + } + return nil +} + +type SetValueRequest struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *any.Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetValueRequest) Reset() { *m = SetValueRequest{} } +func (m *SetValueRequest) String() string { return proto.CompactTextString(m) } +func (*SetValueRequest) ProtoMessage() {} +func (*SetValueRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{9} +} + +func (m *SetValueRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetValueRequest.Unmarshal(m, b) +} +func (m *SetValueRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetValueRequest.Marshal(b, m, deterministic) +} +func (m *SetValueRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetValueRequest.Merge(m, src) +} +func (m *SetValueRequest) XXX_Size() int { + return xxx_messageInfo_SetValueRequest.Size(m) +} +func (m *SetValueRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetValueRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetValueRequest proto.InternalMessageInfo + +func (m *SetValueRequest) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *SetValueRequest) GetValue() *any.Any { + if m != nil { + return m.Value + } + return nil +} + +type DeleteValueRequest struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteValueRequest) Reset() { *m = DeleteValueRequest{} } +func (m *DeleteValueRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteValueRequest) ProtoMessage() {} +func (*DeleteValueRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{10} +} + +func (m *DeleteValueRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteValueRequest.Unmarshal(m, b) +} +func (m *DeleteValueRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteValueRequest.Marshal(b, m, deterministic) +} +func (m *DeleteValueRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteValueRequest.Merge(m, src) +} +func (m *DeleteValueRequest) XXX_Size() int { + return xxx_messageInfo_DeleteValueRequest.Size(m) +} +func (m *DeleteValueRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteValueRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteValueRequest proto.InternalMessageInfo + +func (m *DeleteValueRequest) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +type WatchStoreRequest struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WatchStoreRequest) Reset() { *m = WatchStoreRequest{} } +func (m *WatchStoreRequest) String() string { return proto.CompactTextString(m) } +func (*WatchStoreRequest) ProtoMessage() {} +func (*WatchStoreRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{11} +} + +func (m *WatchStoreRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WatchStoreRequest.Unmarshal(m, b) +} +func (m *WatchStoreRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WatchStoreRequest.Marshal(b, m, deterministic) +} +func (m *WatchStoreRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchStoreRequest.Merge(m, src) +} +func (m *WatchStoreRequest) XXX_Size() int { + return xxx_messageInfo_WatchStoreRequest.Size(m) +} +func (m *WatchStoreRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WatchStoreRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WatchStoreRequest proto.InternalMessageInfo + +func (m *WatchStoreRequest) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +type WatchStoreResponse struct { + Command WatchStoreResponse_Command `protobuf:"varint,1,opt,name=command,proto3,enum=management.WatchStoreResponse_Command" json:"command,omitempty"` + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WatchStoreResponse) Reset() { *m = WatchStoreResponse{} } +func (m *WatchStoreResponse) String() string { return proto.CompactTextString(m) } +func (*WatchStoreResponse) ProtoMessage() {} +func (*WatchStoreResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{12} +} + +func (m *WatchStoreResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WatchStoreResponse.Unmarshal(m, b) +} +func (m *WatchStoreResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WatchStoreResponse.Marshal(b, m, deterministic) +} +func (m *WatchStoreResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchStoreResponse.Merge(m, src) +} +func (m *WatchStoreResponse) XXX_Size() int { + return xxx_messageInfo_WatchStoreResponse.Size(m) +} +func (m *WatchStoreResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WatchStoreResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WatchStoreResponse proto.InternalMessageInfo + +func (m *WatchStoreResponse) GetCommand() WatchStoreResponse_Command { + if m != nil { + return m.Command + } + return WatchStoreResponse_UNKNOWN +} + +func (m *WatchStoreResponse) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *WatchStoreResponse) GetValue() *any.Any { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterEnum("management.LivenessProbeResponse_State", LivenessProbeResponse_State_name, LivenessProbeResponse_State_value) + proto.RegisterEnum("management.ReadinessProbeResponse_State", ReadinessProbeResponse_State_name, ReadinessProbeResponse_State_value) + proto.RegisterEnum("management.WatchStoreResponse_Command", WatchStoreResponse_Command_name, WatchStoreResponse_Command_value) + proto.RegisterType((*LivenessProbeResponse)(nil), "management.LivenessProbeResponse") + proto.RegisterType((*ReadinessProbeResponse)(nil), "management.ReadinessProbeResponse") + proto.RegisterType((*GetNodeRequest)(nil), "management.GetNodeRequest") + proto.RegisterType((*GetNodeResponse)(nil), "management.GetNodeResponse") + proto.RegisterType((*SetNodeRequest)(nil), "management.SetNodeRequest") + proto.RegisterType((*DeleteNodeRequest)(nil), "management.DeleteNodeRequest") + proto.RegisterType((*GetClusterResponse)(nil), "management.GetClusterResponse") + proto.RegisterType((*GetValueRequest)(nil), "management.GetValueRequest") + proto.RegisterType((*GetValueResponse)(nil), "management.GetValueResponse") + proto.RegisterType((*SetValueRequest)(nil), "management.SetValueRequest") + proto.RegisterType((*DeleteValueRequest)(nil), "management.DeleteValueRequest") + proto.RegisterType((*WatchStoreRequest)(nil), "management.WatchStoreRequest") + proto.RegisterType((*WatchStoreResponse)(nil), "management.WatchStoreResponse") +} + +func init() { + proto.RegisterFile("protobuf/management/management.proto", fileDescriptor_5e030ad796566078) +} + +var fileDescriptor_5e030ad796566078 = []byte{ + // 674 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xef, 0x4f, 0xd3, 0x50, + 0x14, 0x5d, 0x8b, 0xa3, 0xe3, 0x22, 0xa3, 0xbc, 0x20, 0xd1, 0xa2, 0x04, 0x1f, 0x8a, 0x28, 0xb1, + 0x33, 0xe8, 0x47, 0x45, 0x27, 0xad, 0x8b, 0x3a, 0x37, 0xd3, 0x22, 0x44, 0x13, 0x63, 0xba, 0xed, + 0x31, 0x16, 0xd6, 0xbe, 0xb9, 0xbe, 0x91, 0xf0, 0xd1, 0x6f, 0xfe, 0x5b, 0xfe, 0x47, 0xfe, 0x09, + 0x66, 0xfd, 0xfd, 0xbb, 0x24, 0x7e, 0x6b, 0xdf, 0xbd, 0xf7, 0x9c, 0x73, 0xcf, 0xde, 0x69, 0x06, + 0x0f, 0x26, 0x53, 0xca, 0x68, 0x6f, 0x76, 0xd6, 0x30, 0x0d, 0xcb, 0x18, 0x12, 0x93, 0x58, 0x2c, + 0xf2, 0x28, 0x3b, 0x65, 0x04, 0xe1, 0x89, 0x74, 0x67, 0x48, 0xe9, 0x70, 0x4c, 0x1a, 0xc1, 0xa0, + 0x61, 0x5d, 0xb9, 0x6d, 0xd2, 0x66, 0xb2, 0x44, 0xcc, 0x09, 0xf3, 0x8a, 0xf8, 0x17, 0x07, 0xb7, + 0xda, 0xa3, 0x4b, 0x62, 0x11, 0xdb, 0xfe, 0x3c, 0xa5, 0x3d, 0xa2, 0x11, 0x7b, 0x42, 0x2d, 0x9b, + 0xa0, 0x57, 0x50, 0xb5, 0x99, 0xc1, 0xc8, 0x6d, 0x6e, 0x9b, 0xdb, 0xab, 0x1f, 0x3c, 0x92, 0x23, + 0xfc, 0x99, 0x13, 0xb2, 0x3e, 0x6f, 0xd7, 0xdc, 0x29, 0xfc, 0x18, 0xaa, 0xce, 0x3b, 0x5a, 0x06, + 0xe1, 0x4b, 0xe7, 0x63, 0xa7, 0x7b, 0xda, 0x11, 0x2b, 0x68, 0x09, 0xaa, 0xcd, 0xf6, 0xfb, 0x13, + 0x55, 0xe4, 0x50, 0x0d, 0x6e, 0x28, 0x6a, 0x53, 0x11, 0x79, 0xfc, 0x9b, 0x83, 0x0d, 0x8d, 0x18, + 0x83, 0x51, 0x5a, 0xc4, 0x61, 0x5c, 0xc4, 0x5e, 0x54, 0x44, 0xf6, 0x48, 0x5c, 0x85, 0x9c, 0xa7, + 0x42, 0x53, 0x9b, 0xca, 0x57, 0x91, 0x43, 0x2b, 0xb0, 0xd4, 0xe9, 0x1e, 0xff, 0x70, 0x5f, 0x79, + 0xbc, 0x0d, 0xf5, 0x16, 0x61, 0x1d, 0x3a, 0x20, 0x1a, 0xf9, 0x39, 0x23, 0x36, 0x43, 0x75, 0xe0, + 0x47, 0x03, 0x87, 0x7e, 0x49, 0xe3, 0x47, 0x03, 0xfc, 0x1d, 0x56, 0x83, 0x0e, 0x4f, 0xe4, 0x0b, + 0x00, 0x8b, 0x0e, 0xc8, 0x11, 0xb5, 0xce, 0x46, 0x43, 0xa7, 0x75, 0xf9, 0x60, 0x5d, 0x76, 0x5d, + 0x97, 0x7d, 0xd7, 0xe5, 0xa6, 0x75, 0xa5, 0x45, 0xfa, 0xd0, 0xba, 0xbf, 0x1a, 0xef, 0x60, 0x7b, + 0x82, 0x4f, 0xa0, 0xae, 0x17, 0x0a, 0x48, 0xb0, 0xf1, 0xd7, 0x63, 0xc3, 0x3b, 0xb0, 0xa6, 0x90, + 0x31, 0x61, 0xa4, 0x68, 0x37, 0x05, 0x50, 0x8b, 0xb0, 0xa3, 0xf1, 0xcc, 0x66, 0x64, 0x1a, 0xac, + 0x27, 0x83, 0xd0, 0x77, 0x8f, 0x0a, 0x77, 0xf3, 0x9b, 0xf0, 0x8e, 0xe3, 0xd0, 0x89, 0x31, 0x9e, + 0x05, 0x44, 0x22, 0x2c, 0x5c, 0x90, 0x2b, 0x8f, 0x69, 0xfe, 0x88, 0x0f, 0x41, 0x0c, 0x9b, 0x3c, + 0xa2, 0x27, 0x50, 0xbd, 0x9c, 0x1f, 0x14, 0xd2, 0xb8, 0x2d, 0xb8, 0x0b, 0xab, 0x7a, 0x19, 0x49, + 0x08, 0xc8, 0x97, 0x03, 0xee, 0x02, 0x72, 0x0d, 0x2a, 0x11, 0xfe, 0x10, 0xd6, 0x4e, 0x0d, 0xd6, + 0x3f, 0xd7, 0x19, 0x9d, 0x16, 0xb4, 0xfd, 0xe1, 0x00, 0x45, 0xfb, 0xbc, 0x15, 0xdf, 0x80, 0xd0, + 0xa7, 0xa6, 0x69, 0x58, 0x03, 0xef, 0x46, 0xef, 0x46, 0x6f, 0x74, 0x7a, 0x40, 0x3e, 0x72, 0xbb, + 0x35, 0x7f, 0xcc, 0xa7, 0xe2, 0x33, 0xb6, 0x5c, 0x28, 0xdf, 0x72, 0x1f, 0x04, 0x0f, 0x31, 0x9e, + 0x08, 0x01, 0x16, 0x74, 0xf5, 0x58, 0xe4, 0x10, 0xc0, 0xa2, 0xa2, 0xb6, 0xd5, 0x63, 0x55, 0xe4, + 0x0f, 0xfe, 0x2e, 0x02, 0x7c, 0x0a, 0xd4, 0xa1, 0x36, 0xac, 0xc4, 0x72, 0x8f, 0x36, 0x52, 0x4c, + 0xea, 0xfc, 0xcb, 0x22, 0xdd, 0x2f, 0xfd, 0x54, 0xe0, 0x0a, 0xea, 0x40, 0x3d, 0x1e, 0xe0, 0x5c, + 0x38, 0x5c, 0x1e, 0x7a, 0x5c, 0x41, 0x0a, 0x08, 0x5e, 0x2e, 0x91, 0x14, 0x1d, 0x88, 0xc7, 0x59, + 0xda, 0xcc, 0xac, 0x05, 0x28, 0xaf, 0x41, 0xd0, 0xb3, 0x50, 0xe2, 0x99, 0x94, 0x72, 0xa4, 0xe2, + 0x0a, 0x52, 0x01, 0xc2, 0x9c, 0xa1, 0x7b, 0x51, 0x8c, 0x54, 0xfe, 0x0a, 0x60, 0xde, 0x01, 0x84, + 0x49, 0xcc, 0x75, 0x66, 0x2b, 0xb1, 0x4c, 0x22, 0xb9, 0xb8, 0x82, 0x3e, 0xc0, 0x4d, 0xe7, 0x52, + 0xfd, 0x37, 0xd2, 0x33, 0x0e, 0xb5, 0xa0, 0xe6, 0x47, 0x16, 0x25, 0x6d, 0x8c, 0x86, 0x46, 0xba, + 0x9b, 0x5d, 0x0c, 0x44, 0x35, 0xa1, 0xa6, 0x67, 0x02, 0x25, 0x12, 0x5d, 0xe0, 0x4f, 0x0b, 0x96, + 0x23, 0x69, 0x45, 0x5b, 0x69, 0x9f, 0xaf, 0x09, 0xd4, 0x05, 0x08, 0x53, 0x17, 0xff, 0xbd, 0x52, + 0x31, 0x8f, 0xbb, 0x94, 0x0e, 0xab, 0xe3, 0xd2, 0x4b, 0xa8, 0xe9, 0x96, 0x31, 0xb1, 0xcf, 0x29, + 0xcb, 0x75, 0x3b, 0x57, 0xce, 0xdb, 0xa7, 0xdf, 0xf6, 0x87, 0x23, 0x76, 0x3e, 0xeb, 0xc9, 0x7d, + 0x6a, 0x36, 0x4c, 0x6a, 0xcf, 0x2e, 0x8c, 0x46, 0x6f, 0x6c, 0xd8, 0xac, 0x91, 0xf1, 0x97, 0xa0, + 0xb7, 0xe8, 0x1c, 0x3e, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x12, 0x52, 0x02, 0x9b, 0x30, 0x08, + 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ManagementClient is the client API for Management service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ManagementClient interface { + LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) + ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) + GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) + SetNode(ctx context.Context, in *SetNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) + GetCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetClusterResponse, error) + WatchCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Management_WatchClusterClient, error) + GetValue(ctx context.Context, in *GetValueRequest, opts ...grpc.CallOption) (*GetValueResponse, error) + SetValue(ctx context.Context, in *SetValueRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteValue(ctx context.Context, in *DeleteValueRequest, opts ...grpc.CallOption) (*empty.Empty, error) + WatchStore(ctx context.Context, in *WatchStoreRequest, opts ...grpc.CallOption) (Management_WatchStoreClient, error) + Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) +} + +type managementClient struct { + cc *grpc.ClientConn +} + +func NewManagementClient(cc *grpc.ClientConn) ManagementClient { + return &managementClient{cc} +} + +func (c *managementClient) LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) { + out := new(LivenessProbeResponse) + err := c.cc.Invoke(ctx, "/management.Management/LivenessProbe", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *managementClient) ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) { + out := new(ReadinessProbeResponse) + err := c.cc.Invoke(ctx, "/management.Management/ReadinessProbe", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *managementClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) { + out := new(GetNodeResponse) + err := c.cc.Invoke(ctx, "/management.Management/GetNode", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *managementClient) SetNode(ctx context.Context, in *SetNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/management.Management/SetNode", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *managementClient) DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/management.Management/DeleteNode", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *managementClient) GetCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetClusterResponse, error) { + out := new(GetClusterResponse) + err := c.cc.Invoke(ctx, "/management.Management/GetCluster", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *managementClient) WatchCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Management_WatchClusterClient, error) { + stream, err := c.cc.NewStream(ctx, &_Management_serviceDesc.Streams[0], "/management.Management/WatchCluster", opts...) + if err != nil { + return nil, err + } + x := &managementWatchClusterClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Management_WatchClusterClient interface { + Recv() (*GetClusterResponse, error) + grpc.ClientStream +} + +type managementWatchClusterClient struct { + grpc.ClientStream +} + +func (x *managementWatchClusterClient) Recv() (*GetClusterResponse, error) { + m := new(GetClusterResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *managementClient) GetValue(ctx context.Context, in *GetValueRequest, opts ...grpc.CallOption) (*GetValueResponse, error) { + out := new(GetValueResponse) + err := c.cc.Invoke(ctx, "/management.Management/GetValue", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *managementClient) SetValue(ctx context.Context, in *SetValueRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/management.Management/SetValue", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *managementClient) DeleteValue(ctx context.Context, in *DeleteValueRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/management.Management/DeleteValue", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *managementClient) WatchStore(ctx context.Context, in *WatchStoreRequest, opts ...grpc.CallOption) (Management_WatchStoreClient, error) { + stream, err := c.cc.NewStream(ctx, &_Management_serviceDesc.Streams[1], "/management.Management/WatchStore", opts...) + if err != nil { + return nil, err + } + x := &managementWatchStoreClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Management_WatchStoreClient interface { + Recv() (*WatchStoreResponse, error) + grpc.ClientStream +} + +type managementWatchStoreClient struct { + grpc.ClientStream +} + +func (x *managementWatchStoreClient) Recv() (*WatchStoreResponse, error) { + m := new(WatchStoreResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *managementClient) Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/management.Management/Snapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ManagementServer is the server API for Management service. +type ManagementServer interface { + LivenessProbe(context.Context, *empty.Empty) (*LivenessProbeResponse, error) + ReadinessProbe(context.Context, *empty.Empty) (*ReadinessProbeResponse, error) + GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error) + SetNode(context.Context, *SetNodeRequest) (*empty.Empty, error) + DeleteNode(context.Context, *DeleteNodeRequest) (*empty.Empty, error) + GetCluster(context.Context, *empty.Empty) (*GetClusterResponse, error) + WatchCluster(*empty.Empty, Management_WatchClusterServer) error + GetValue(context.Context, *GetValueRequest) (*GetValueResponse, error) + SetValue(context.Context, *SetValueRequest) (*empty.Empty, error) + DeleteValue(context.Context, *DeleteValueRequest) (*empty.Empty, error) + WatchStore(*WatchStoreRequest, Management_WatchStoreServer) error + Snapshot(context.Context, *empty.Empty) (*empty.Empty, error) +} + +func RegisterManagementServer(s *grpc.Server, srv ManagementServer) { + s.RegisterService(&_Management_serviceDesc, srv) +} + +func _Management_LivenessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ManagementServer).LivenessProbe(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.Management/LivenessProbe", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ManagementServer).LivenessProbe(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Management_ReadinessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ManagementServer).ReadinessProbe(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.Management/ReadinessProbe", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ManagementServer).ReadinessProbe(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Management_GetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ManagementServer).GetNode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.Management/GetNode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ManagementServer).GetNode(ctx, req.(*GetNodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Management_SetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ManagementServer).SetNode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.Management/SetNode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ManagementServer).SetNode(ctx, req.(*SetNodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Management_DeleteNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ManagementServer).DeleteNode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.Management/DeleteNode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ManagementServer).DeleteNode(ctx, req.(*DeleteNodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Management_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ManagementServer).GetCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.Management/GetCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ManagementServer).GetCluster(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Management_WatchCluster_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(empty.Empty) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ManagementServer).WatchCluster(m, &managementWatchClusterServer{stream}) +} + +type Management_WatchClusterServer interface { + Send(*GetClusterResponse) error + grpc.ServerStream +} + +type managementWatchClusterServer struct { + grpc.ServerStream +} + +func (x *managementWatchClusterServer) Send(m *GetClusterResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Management_GetValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetValueRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ManagementServer).GetValue(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.Management/GetValue", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ManagementServer).GetValue(ctx, req.(*GetValueRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Management_SetValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetValueRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ManagementServer).SetValue(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.Management/SetValue", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ManagementServer).SetValue(ctx, req.(*SetValueRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Management_DeleteValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteValueRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ManagementServer).DeleteValue(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.Management/DeleteValue", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ManagementServer).DeleteValue(ctx, req.(*DeleteValueRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Management_WatchStore_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(WatchStoreRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ManagementServer).WatchStore(m, &managementWatchStoreServer{stream}) +} + +type Management_WatchStoreServer interface { + Send(*WatchStoreResponse) error + grpc.ServerStream +} + +type managementWatchStoreServer struct { + grpc.ServerStream +} + +func (x *managementWatchStoreServer) Send(m *WatchStoreResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Management_Snapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ManagementServer).Snapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/management.Management/Snapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ManagementServer).Snapshot(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _Management_serviceDesc = grpc.ServiceDesc{ + ServiceName: "management.Management", + HandlerType: (*ManagementServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "LivenessProbe", + Handler: _Management_LivenessProbe_Handler, + }, + { + MethodName: "ReadinessProbe", + Handler: _Management_ReadinessProbe_Handler, + }, + { + MethodName: "GetNode", + Handler: _Management_GetNode_Handler, + }, + { + MethodName: "SetNode", + Handler: _Management_SetNode_Handler, + }, + { + MethodName: "DeleteNode", + Handler: _Management_DeleteNode_Handler, + }, + { + MethodName: "GetCluster", + Handler: _Management_GetCluster_Handler, + }, + { + MethodName: "GetValue", + Handler: _Management_GetValue_Handler, + }, + { + MethodName: "SetValue", + Handler: _Management_SetValue_Handler, + }, + { + MethodName: "DeleteValue", + Handler: _Management_DeleteValue_Handler, + }, + { + MethodName: "Snapshot", + Handler: _Management_Snapshot_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "WatchCluster", + Handler: _Management_WatchCluster_Handler, + ServerStreams: true, + }, + { + StreamName: "WatchStore", + Handler: _Management_WatchStore_Handler, + ServerStreams: true, + }, + }, + Metadata: "protobuf/management/management.proto", +} diff --git a/protobuf/management/management.proto b/protobuf/management/management.proto new file mode 100644 index 0000000..dfeb9d2 --- /dev/null +++ b/protobuf/management/management.proto @@ -0,0 +1,118 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +import "google/protobuf/any.proto"; +import "google/protobuf/empty.proto"; + +package management; + +option go_package = "github.com/mosuka/blast/protobuf/management"; + +service Management { + rpc LivenessProbe (google.protobuf.Empty) returns (LivenessProbeResponse) {} + rpc ReadinessProbe (google.protobuf.Empty) returns (ReadinessProbeResponse) {} + + rpc GetNode (GetNodeRequest) returns (GetNodeResponse) {} + rpc SetNode (SetNodeRequest) returns (google.protobuf.Empty) {} + rpc DeleteNode (DeleteNodeRequest) returns (google.protobuf.Empty) {} + rpc GetCluster (google.protobuf.Empty) returns (GetClusterResponse) {} + rpc WatchCluster (google.protobuf.Empty) returns (stream GetClusterResponse) {} + + rpc GetValue (GetValueRequest) returns (GetValueResponse) {} + rpc SetValue (SetValueRequest) returns (google.protobuf.Empty) {} + rpc DeleteValue (DeleteValueRequest) returns (google.protobuf.Empty) {} + rpc WatchStore (WatchStoreRequest) returns (stream WatchStoreResponse) {} + rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) {} +} + +// use for health check +message LivenessProbeResponse { + enum State { + UNKNOWN = 0; + ALIVE = 1; + DEAD = 2; + } + State state = 1; +} + +// use for health check +message ReadinessProbeResponse { + enum State { + UNKNOWN = 0; + READY = 1; + NOT_READY = 2; + } + State state = 1; +} + +// use for raft cluster status +message GetNodeRequest { + string id = 1; +} + +// use for raft cluster status +message GetNodeResponse { + google.protobuf.Any nodeConfig = 1; + string state = 2; +} + +// use for raft cluster status +message SetNodeRequest { + string id = 1; + google.protobuf.Any nodeConfig = 2; +} + +// use for raft cluster status +message DeleteNodeRequest { + string id = 1; +} + +// use for raft cluster status +message GetClusterResponse { + google.protobuf.Any cluster = 1; +} + +message GetValueRequest { + string key = 1; +} + +message GetValueResponse { + google.protobuf.Any value = 1; +} + +message SetValueRequest { + string key = 1; + google.protobuf.Any value = 2; +} + +message DeleteValueRequest { + string key = 1; +} + +message WatchStoreRequest { + string key = 1; +} + +message WatchStoreResponse { + enum Command { + UNKNOWN = 0; + SET = 1; + DELETE = 2; + } + Command command = 1; + string key = 2; + google.protobuf.Any value = 3; +} diff --git a/protobuf/util_test.go b/protobuf/util_test.go index f8fb7e4..b94d717 100644 --- a/protobuf/util_test.go +++ b/protobuf/util_test.go @@ -29,19 +29,19 @@ func TestMarshalAny_Slice(t *testing.T) { dataAny := &any.Any{} err := UnmarshalAny(data, dataAny) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expectedType := "[]interface {}" actualType := dataAny.TypeUrl if expectedType != actualType { - t.Errorf("expected content to see %s, saw %s", expectedType, actualType) + t.Fatalf("expected content to see %s, saw %s", expectedType, actualType) } expectedValue := []byte(`["a",1]`) actualValue := dataAny.Value if !bytes.Equal(expectedValue, actualValue) { - t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) + t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) } } @@ -51,19 +51,19 @@ func TestMarshalAny_Map(t *testing.T) { dataAny := &any.Any{} err := UnmarshalAny(data, dataAny) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expectedMapType := "map[string]interface {}" actualMapType := dataAny.TypeUrl if expectedMapType != actualMapType { - t.Errorf("expected content to see %s, saw %s", expectedMapType, actualMapType) + t.Fatalf("expected content to see %s, saw %s", expectedMapType, actualMapType) } expectedValue := []byte(`{"a":1,"b":2,"c":3}`) actualValue := dataAny.Value if !bytes.Equal(expectedValue, actualValue) { - t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) + t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) } } @@ -72,7 +72,7 @@ func TestMarshalAny_Map(t *testing.T) { // fieldsAny := &any.Any{} // err := UnmarshalAny(fieldsMap, fieldsAny) // if err != nil { -// t.Errorf("%v", err) +// t.Fatalf("%v", err) // } // // data := &index.Document{ @@ -83,19 +83,19 @@ func TestMarshalAny_Map(t *testing.T) { // dataAny := &any.Any{} // err = UnmarshalAny(data, dataAny) // if err != nil { -// t.Errorf("%v", err) +// t.Fatalf("%v", err) // } // // expectedType := "index.Document" // actualType := dataAny.TypeUrl // if expectedType != actualType { -// t.Errorf("expected content to see %s, saw %s", expectedType, actualType) +// t.Fatalf("expected content to see %s, saw %s", expectedType, actualType) // } // // expectedValue := []byte(`{"id":"1","fields":{"type_url":"map[string]interface {}","value":"eyJmMSI6ImFhYSIsImYyIjoyMjIsImYzIjoiY2NjIn0="}}`) // actualValue := dataAny.Value // if !bytes.Equal(expectedValue, actualValue) { -// t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) +// t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) // } //} @@ -114,19 +114,19 @@ func TestMarshalAny_Map(t *testing.T) { // dataAny := &any.Any{} // err := UnmarshalAny(data, dataAny) // if err != nil { -// t.Errorf("%v", err) +// t.Fatalf("%v", err) // } // // expectedType := "raft.Node" // actualType := dataAny.TypeUrl // if expectedType != actualType { -// t.Errorf("expected content to see %s, saw %s", expectedType, actualType) +// t.Fatalf("expected content to see %s, saw %s", expectedType, actualType) // } // // expectedValue := []byte(`{"id":"node1","metadata":{"bind_addr":":6060","grpc_addr":":5050","http_addr":":8080","data_dir":"/tmp/blast/index1","leader":true}}`) // actualValue := dataAny.Value // if !bytes.Equal(expectedValue, actualValue) { -// t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) +// t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) // } //} @@ -136,19 +136,19 @@ func TestMarshalAny_SearchRequest(t *testing.T) { dataAny := &any.Any{} err := UnmarshalAny(data, dataAny) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expectedType := "bleve.SearchRequest" actualType := dataAny.TypeUrl if expectedType != actualType { - t.Errorf("expected content to see %s, saw %s", expectedType, actualType) + t.Fatalf("expected content to see %s, saw %s", expectedType, actualType) } expectedValue := []byte(`{"query":{"query":"blast"},"size":10,"from":0,"highlight":null,"fields":null,"facets":null,"explain":false,"sort":["-_score"],"includeLocations":false}`) actualValue := dataAny.Value if !bytes.Equal(expectedValue, actualValue) { - t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) + t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) } } @@ -160,19 +160,19 @@ func TestMarshalAny_SearchResult(t *testing.T) { dataAny := &any.Any{} err := UnmarshalAny(data, dataAny) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } expectedType := "bleve.SearchResult" actualType := dataAny.TypeUrl if expectedType != actualType { - t.Errorf("expected content to see %s, saw %s", expectedType, actualType) + t.Fatalf("expected content to see %s, saw %s", expectedType, actualType) } expectedValue := []byte(`{"status":null,"request":null,"hits":null,"total_hits":10,"max_score":0,"took":0,"facets":null}`) actualValue := dataAny.Value if !bytes.Equal(expectedValue, actualValue) { - t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) + t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) } } @@ -184,7 +184,7 @@ func TestUnmarshalAny_Slice(t *testing.T) { ins, err := MarshalAny(dataAny) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } data := *ins.(*[]interface{}) @@ -192,13 +192,13 @@ func TestUnmarshalAny_Slice(t *testing.T) { expected1 := "a" actual1 := data[0] if expected1 != actual1 { - t.Errorf("expected content to see %v, saw %v", expected1, actual1) + t.Fatalf("expected content to see %v, saw %v", expected1, actual1) } expected2 := float64(1) actual2 := data[1] if expected2 != actual2 { - t.Errorf("expected content to see %v, saw %v", expected2, actual2) + t.Fatalf("expected content to see %v, saw %v", expected2, actual2) } } @@ -210,7 +210,7 @@ func TestUnmarshalAny_Map(t *testing.T) { ins, err := MarshalAny(dataAny) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } data := *ins.(*map[string]interface{}) @@ -218,19 +218,19 @@ func TestUnmarshalAny_Map(t *testing.T) { expected1 := float64(1) actual1 := data["a"] if expected1 != actual1 { - t.Errorf("expected content to see %v, saw %v", expected1, actual1) + t.Fatalf("expected content to see %v, saw %v", expected1, actual1) } expected2 := float64(2) actual2 := data["b"] if expected2 != actual2 { - t.Errorf("expected content to see %v, saw %v", expected2, actual2) + t.Fatalf("expected content to see %v, saw %v", expected2, actual2) } expected3 := float64(3) actual3 := data["c"] if expected3 != actual3 { - t.Errorf("expected content to see %v, saw %v", expected3, actual3) + t.Fatalf("expected content to see %v, saw %v", expected3, actual3) } } @@ -242,7 +242,7 @@ func TestUnmarshalAny_Map(t *testing.T) { // // ins, err := MarshalAny(dataAny) // if err != nil { -// t.Errorf("%v", err) +// t.Fatalf("%v", err) // } // // data := *ins.(*index.Document) @@ -250,19 +250,19 @@ func TestUnmarshalAny_Map(t *testing.T) { // expected1 := "1" // actual1 := data.Id // if expected1 != actual1 { -// t.Errorf("expected content to see %v, saw %v", expected1, actual1) +// t.Fatalf("expected content to see %v, saw %v", expected1, actual1) // } // // expected2 := "map[string]interface {}" // actual2 := data.Fields.TypeUrl // if expected2 != actual2 { -// t.Errorf("expected content to see %v, saw %v", expected2, actual2) +// t.Fatalf("expected content to see %v, saw %v", expected2, actual2) // } // // expected3 := []byte(`{"f1":"aaa","f2":222,"f3":"ccc"}`) // actual3 := data.Fields.Value // if !bytes.Equal(expected3, actual3) { -// t.Errorf("expected content to see %v, saw %v", expected3, actual3) +// t.Fatalf("expected content to see %v, saw %v", expected3, actual3) // } //} @@ -274,7 +274,7 @@ func TestUnmarshalAny_SearchRequest(t *testing.T) { ins, err := MarshalAny(dataAny) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } data := *ins.(*bleve.SearchRequest) @@ -282,7 +282,7 @@ func TestUnmarshalAny_SearchRequest(t *testing.T) { expected1 := bleve.NewQueryStringQuery("blast").Query actual1 := data.Query.(*query.QueryStringQuery).Query if expected1 != actual1 { - t.Errorf("expected content to see %v, saw %v", expected1, actual1) + t.Fatalf("expected content to see %v, saw %v", expected1, actual1) } } @@ -294,7 +294,7 @@ func TestUnmarshalAny_SearchResult(t *testing.T) { ins, err := MarshalAny(dataAny) if err != nil { - t.Errorf("%v", err) + t.Fatalf("%v", err) } data := *ins.(*bleve.SearchResult) @@ -302,6 +302,6 @@ func TestUnmarshalAny_SearchResult(t *testing.T) { expected1 := uint64(10) actual1 := data.Total if expected1 != actual1 { - t.Errorf("expected content to see %v, saw %v", expected1, actual1) + t.Fatalf("expected content to see %v, saw %v", expected1, actual1) } } From 6170d6d1c73df57dda9a8cd316d8805cc0684a61 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Fri, 26 Jul 2019 20:48:33 +0900 Subject: [PATCH 04/76] Update CHANGES.txt --- CHANGES.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 5eb9607..ef4da28 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -12,6 +12,9 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ### Changed +- New CLI #82 +- Split protobuf into components #84 + ## [v0.7.1] - 2019-07-18 ### Added From c03b8577550be1322c88da2018302cbfd4ced95e Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Sat, 27 Jul 2019 02:17:33 +0900 Subject: [PATCH 05/76] Change subcommands (#85) --- README.md | 74 +- ...ributor_delete.go => dispatcher_delete.go} | 2 +- .../{distributor_get.go => dispatcher_get.go} | 2 +- ...stributor_index.go => dispatcher_index.go} | 2 +- ...de_health.go => dispatcher_node_health.go} | 2 +- ...ributor_search.go => dispatcher_search.go} | 2 +- ...utor_node_start.go => dispatcher_start.go} | 4 +- ..._peers_info.go => indexer_cluster_info.go} | 2 +- ...node_leave.go => indexer_cluster_leave.go} | 4 +- ...eers_watch.go => indexer_cluster_watch.go} | 4 +- ...r_node_snapshot.go => indexer_snapshot.go} | 2 +- ...indexer_node_start.go => indexer_start.go} | 4 +- cmd/blast/main.go | 1148 ++++++++--------- ..._peers_info.go => manager_cluster_info.go} | 2 +- ...node_leave.go => manager_cluster_leave.go} | 8 +- ...eers_watch.go => manager_cluster_watch.go} | 4 +- .../{cluster_delete.go => manager_delete.go} | 2 +- cmd/blast/{cluster_get.go => manager_get.go} | 2 +- ..._node_health.go => manager_node_health.go} | 2 +- ...ster_node_info.go => manager_node_info.go} | 14 +- cmd/blast/{cluster_set.go => manager_set.go} | 2 +- ...r_node_snapshot.go => manager_snapshot.go} | 2 +- ...cluster_node_start.go => manager_start.go} | 2 +- .../{cluster_watch.go => manager_watch.go} | 2 +- ...i_bulk_index.txt => wiki_bulk_index.jsonl} | 0 25 files changed, 645 insertions(+), 649 deletions(-) rename cmd/blast/{distributor_delete.go => dispatcher_delete.go} (97%) rename cmd/blast/{distributor_get.go => dispatcher_get.go} (96%) rename cmd/blast/{distributor_index.go => dispatcher_index.go} (98%) rename cmd/blast/{distributor_node_health.go => dispatcher_node_health.go} (97%) rename cmd/blast/{distributor_search.go => dispatcher_search.go} (97%) rename cmd/blast/{distributor_node_start.go => dispatcher_start.go} (96%) rename cmd/blast/{indexer_peers_info.go => indexer_cluster_info.go} (96%) rename cmd/blast/{indexer_node_leave.go => indexer_cluster_leave.go} (92%) rename cmd/blast/{indexer_peers_watch.go => indexer_cluster_watch.go} (95%) rename cmd/blast/{indexer_node_snapshot.go => indexer_snapshot.go} (95%) rename cmd/blast/{indexer_node_start.go => indexer_start.go} (97%) rename cmd/blast/{cluster_peers_info.go => manager_cluster_info.go} (96%) rename cmd/blast/{cluster_node_leave.go => manager_cluster_leave.go} (81%) rename cmd/blast/{cluster_peers_watch.go => manager_cluster_watch.go} (95%) rename cmd/blast/{cluster_delete.go => manager_delete.go} (96%) rename cmd/blast/{cluster_get.go => manager_get.go} (96%) rename cmd/blast/{cluster_node_health.go => manager_node_health.go} (97%) rename cmd/blast/{cluster_node_info.go => manager_node_info.go} (83%) rename cmd/blast/{cluster_set.go => manager_set.go} (97%) rename cmd/blast/{cluster_node_snapshot.go => manager_snapshot.go} (95%) rename cmd/blast/{cluster_node_start.go => manager_start.go} (98%) rename cmd/blast/{cluster_watch.go => manager_watch.go} (97%) rename example/{wiki_bulk_index.txt => wiki_bulk_index.jsonl} (100%) diff --git a/README.md b/README.md index ba90827..9012660 100644 --- a/README.md +++ b/README.md @@ -171,7 +171,7 @@ You can see the binary file when build successful like so: ```bash $ ls ./bin -blast blastd +blast ``` @@ -245,7 +245,7 @@ $ make \ Running a Blast in standalone mode is easy. Start a indexer like so: ```bash -$ ./bin/blast indexer node start \ +$ ./bin/blast indexer start \ --grpc-address=:5000 \ --http-address=:8000 \ --node-id=indexer1 \ @@ -531,7 +531,7 @@ You can see the result in JSON format. The result of the above command is: Indexing documents in bulk, run the following command: ```bash -$ ./bin/blast indexer index --grpc-address=:5000 --file=./example/wiki_bulk_index.txt --bulk +$ ./bin/blast indexer index --grpc-address=:5000 --file=./example/wiki_bulk_index.jsonl --bulk ``` You can see the result in JSON format. The result of the above command is: @@ -566,7 +566,7 @@ Also you can do above commands via HTTP REST API that listened port 5002. Indexing a document via HTTP is as following: ```bash -$ curl -X PUT 'http://127.0.0.1:8000/documents/enwiki_1' --data-binary ' +$ curl -X PUT 'http://127.0.0.1:8000/documents/enwiki_1' -H 'Content-Type: application/json' --data-binary ' { "title_en": "Search engine (computing)", "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", @@ -596,7 +596,7 @@ $ curl -X GET 'http://127.0.0.1:8000/documents/enwiki_1' Searching documents via HTTP is as following: ```bash -$ curl -X POST 'http://127.0.0.1:8000/search' --data-binary @./example/wiki_search_request.json +$ curl -X POST 'http://127.0.0.1:8000/search' -H 'Content-Type: application/json' --data-binary @./example/wiki_search_request.json ``` @@ -614,7 +614,7 @@ $ curl -X DELETE 'http://127.0.0.1:8000/documents/enwiki_1' Indexing documents in bulk via HTTP is as following: ```bash -$ curl -X PUT 'http://127.0.0.1:8000/documents?bulk=true' --data-binary @./example/wiki_bulk_index.txt +$ curl -X PUT 'http://127.0.0.1:8000/documents?bulk=true' -H 'Content-Type: application/x-ndjson' --data-binary @./example/wiki_bulk_index.jsonl ``` @@ -623,7 +623,7 @@ $ curl -X PUT 'http://127.0.0.1:8000/documents?bulk=true' --data-binary @./examp Deleting documents in bulk via HTTP is as following: ```bash -$ curl -X DELETE 'http://127.0.0.1:8000/documents' --data-binary @./example/wiki_bulk_delete.txt +$ curl -X DELETE 'http://127.0.0.1:8000/documents' -H 'Content-Type: text/plain' --data-binary @./example/wiki_bulk_delete.txt ``` @@ -636,7 +636,7 @@ Blast can easily bring up a cluster. Running a Blast in standalone is not fault First of all, start a indexer in standalone. ```bash -$ ./bin/blast indexer node start \ +$ ./bin/blast indexer start \ --grpc-address=:5000 \ --http-address=:8000 \ --node-id=indexer1 \ @@ -651,7 +651,7 @@ $ ./bin/blast indexer node start \ Then, start two more indexers. ```bash -$ ./bin/blast indexer node start \ +$ ./bin/blast indexer start \ --peer-grpc-address=:5000 \ --grpc-address=:5010 \ --http-address=:8010 \ @@ -660,7 +660,7 @@ $ ./bin/blast indexer node start \ --data-dir=/tmp/blast/indexer2 \ --raft-storage-type=boltdb -$ ./bin/blast indexer node start \ +$ ./bin/blast indexer start \ --peer-grpc-address=:5000 \ --grpc-address=:5020 \ --http-address=:8020 \ @@ -677,7 +677,7 @@ So you have a 3-node cluster. That way you can tolerate the failure of 1 node. Y ```bash -$ ./bin/blast indexer peers info --grpc-address=:5000 +$ ./bin/blast indexer cluster info --grpc-address=:5000 ``` You can see the result in JSON format. The result of the above command is: @@ -783,7 +783,7 @@ Blast provides the following type of node for federation: Manager can also bring up a cluster like an indexer. Specify a common index mapping for federation at startup. ```bash -$ ./bin/blast cluster node start \ +$ ./bin/blast manager start \ --grpc-address=:5100 \ --http-address=:8100 \ --node-id=cluster1 \ @@ -794,7 +794,7 @@ $ ./bin/blast cluster node start \ --index-type=upside_down \ --index-storage-type=boltdb -$ ./bin/blast cluster node start \ +$ ./bin/blast manager start \ --peer-grpc-address=:5100 \ --grpc-address=:5110 \ --http-address=:8110 \ @@ -803,7 +803,7 @@ $ ./bin/blast cluster node start \ --data-dir=/tmp/blast/cluster2 \ --raft-storage-type=boltdb -$ ./bin/blast cluster node start \ +$ ./bin/blast manager start \ --peer-grpc-address=:5100 \ --grpc-address=:5120 \ --http-address=:8120 \ @@ -819,8 +819,8 @@ Federated mode differs from cluster mode that it specifies the manager in start The following example starts two 3-node clusters. ```bash -$ ./bin/blast indexer node start \ - --cluster-grpc-address=:5100 \ +$ ./bin/blast indexer start \ + --manager-grpc-address=:5100 \ --shard-id=shard1 \ --grpc-address=:5000 \ --http-address=:8000 \ @@ -829,8 +829,8 @@ $ ./bin/blast indexer node start \ --data-dir=/tmp/blast/indexer1 \ --raft-storage-type=boltdb -$ ./bin/blast indexer node start \ - --cluster-grpc-address=:5100 \ +$ ./bin/blast indexer start \ + --manager-grpc-address=:5100 \ --shard-id=shard1 \ --grpc-address=:5010 \ --http-address=:8010 \ @@ -839,8 +839,8 @@ $ ./bin/blast indexer node start \ --data-dir=/tmp/blast/indexer2 \ --raft-storage-type=boltdb -$ ./bin/blast indexer node start \ - --cluster-grpc-address=:5100 \ +$ ./bin/blast indexer start \ + --manager-grpc-address=:5100 \ --shard-id=shard1 \ --grpc-address=:5020 \ --http-address=:8020 \ @@ -849,8 +849,8 @@ $ ./bin/blast indexer node start \ --data-dir=/tmp/blast/indexer3 \ --raft-storage-type=boltdb -$ ./bin/blast indexer node start \ - --cluster-grpc-address=:5100 \ +$ ./bin/blast indexer start \ + --manager-grpc-address=:5100 \ --shard-id=shard2 \ --grpc-address=:5030 \ --http-address=:8030 \ @@ -859,8 +859,8 @@ $ ./bin/blast indexer node start \ --data-dir=/tmp/blast/indexer4 \ --raft-storage-type=boltdb -$ ./bin/blast indexer node start \ - --cluster-grpc-address=:5100 \ +$ ./bin/blast indexer start \ + --manager-grpc-address=:5100 \ --shard-id=shard2 \ --grpc-address=:5040 \ --http-address=:8040 \ @@ -869,8 +869,8 @@ $ ./bin/blast indexer node start \ --data-dir=/tmp/blast/indexer5 \ --raft-storage-type=boltdb -$ ./bin/blast indexer node start \ - --cluster-grpc-address=:5100 \ +$ ./bin/blast indexer start \ + --manager-grpc-address=:5100 \ --shard-id=shard2 \ --grpc-address=:5050 \ --http-address=:8050 \ @@ -885,22 +885,28 @@ $ ./bin/blast indexer node start \ Finally, start the dispatcher with a manager that manages the target federation so that it can perform distributed search and indexing. ```bash -$ ./bin/blast distributor node start \ - --cluster-grpc-address=:5100 \ +$ ./bin/blast dispatcher start \ + --manager-grpc-address=:5100 \ --grpc-address=:5200 \ --http-address=:8200 ``` ```bash -$ ./bin/blast distributor index --grpc-address=:5200 --file=./example/wiki_bulk_index.txt --bulk +$ ./bin/blast manager cluster info --grpc-address=:5100 +$ ./bin/blast indexer cluster info --grpc-address=:5000 +$ ./bin/blast indexer cluster info --grpc-address=:5040 ``` ```bash -$ ./bin/blast distributor search --grpc-address=:5200 --file=./example/wiki_search_request_simple.json +$ ./bin/blast dispatcher index --grpc-address=:5200 --file=./example/wiki_bulk_index.jsonl --bulk ``` ```bash -$ ./bin/blast distributor delete --grpc-address=:5200 --file=./example/wiki_bulk_delete.txt +$ ./bin/blast dispatcher search --grpc-address=:5200 --file=./example/wiki_search_request_simple.json +``` + +```bash +$ ./bin/blast dispatcher delete --grpc-address=:5200 --file=./example/wiki_bulk_delete.txt ``` @@ -945,7 +951,7 @@ $ docker run --rm --name blast-indexer1 \ -p 5000:5000 \ -p 8000:8000 \ -v $(pwd)/example:/opt/blast/example \ - mosuka/blast:latest blast indexer node start \ + mosuka/blast:latest blast indexer start \ --grpc-address=:5000 \ --http-address=:8000 \ --node-id=blast-indexer1 \ @@ -995,7 +1001,7 @@ $ ./WikiExtractor.py -o ~/tmp/enwiki --json ~/tmp/enwiki-20190101-pages-articles ### Starting Indexer ```bash -$ ./bin/blast indexer node start \ +$ ./bin/blast indexer start \ --grpc-address=:5000 \ --http-address=:8000 \ --node-id=indexer1 \ @@ -1027,7 +1033,7 @@ This section explain how to index Spatial/Geospatial data to Blast. ### Starting Indexer with Spatial/Geospatial index mapping ```bash -$ ./bin/blast indexer node start \ +$ ./bin/blast indexer start \ --grpc-address=:5000 \ --http-address=:8000 \ --node-id=indexer1 \ diff --git a/cmd/blast/distributor_delete.go b/cmd/blast/dispatcher_delete.go similarity index 97% rename from cmd/blast/distributor_delete.go rename to cmd/blast/dispatcher_delete.go index 222af64..8466634 100644 --- a/cmd/blast/distributor_delete.go +++ b/cmd/blast/dispatcher_delete.go @@ -25,7 +25,7 @@ import ( "github.com/urfave/cli" ) -func distributorDelete(c *cli.Context) error { +func dispatcherDelete(c *cli.Context) error { grpcAddr := c.String("grpc-address") filePath := c.String("file") id := c.Args().Get(0) diff --git a/cmd/blast/distributor_get.go b/cmd/blast/dispatcher_get.go similarity index 96% rename from cmd/blast/distributor_get.go rename to cmd/blast/dispatcher_get.go index e7f0cd1..d9108fb 100644 --- a/cmd/blast/distributor_get.go +++ b/cmd/blast/dispatcher_get.go @@ -24,7 +24,7 @@ import ( "github.com/urfave/cli" ) -func distributorGet(c *cli.Context) error { +func dispatcherGet(c *cli.Context) error { grpcAddr := c.String("grpc-address") id := c.Args().Get(0) if id == "" { diff --git a/cmd/blast/distributor_index.go b/cmd/blast/dispatcher_index.go similarity index 98% rename from cmd/blast/distributor_index.go rename to cmd/blast/dispatcher_index.go index ce78c90..57f282d 100644 --- a/cmd/blast/distributor_index.go +++ b/cmd/blast/dispatcher_index.go @@ -27,7 +27,7 @@ import ( "github.com/urfave/cli" ) -func distributorIndex(c *cli.Context) error { +func dispatcherIndex(c *cli.Context) error { grpcAddr := c.String("grpc-address") filePath := c.String("file") bulk := c.Bool("bulk") diff --git a/cmd/blast/distributor_node_health.go b/cmd/blast/dispatcher_node_health.go similarity index 97% rename from cmd/blast/distributor_node_health.go rename to cmd/blast/dispatcher_node_health.go index cf0b51e..698473e 100644 --- a/cmd/blast/distributor_node_health.go +++ b/cmd/blast/dispatcher_node_health.go @@ -22,7 +22,7 @@ import ( "github.com/urfave/cli" ) -func distributorNodeHealth(c *cli.Context) error { +func dispatcherNodeHealth(c *cli.Context) error { grpcAddr := c.String("grpc-address") liveness := c.Bool("liveness") readiness := c.Bool("readiness") diff --git a/cmd/blast/distributor_search.go b/cmd/blast/dispatcher_search.go similarity index 97% rename from cmd/blast/distributor_search.go rename to cmd/blast/dispatcher_search.go index b9494cc..976e36a 100644 --- a/cmd/blast/distributor_search.go +++ b/cmd/blast/dispatcher_search.go @@ -25,7 +25,7 @@ import ( "github.com/urfave/cli" ) -func distributorSearch(c *cli.Context) error { +func dispatcherSearch(c *cli.Context) error { grpcAddr := c.String("grpc-address") searchRequestPath := c.String("file") diff --git a/cmd/blast/distributor_node_start.go b/cmd/blast/dispatcher_start.go similarity index 96% rename from cmd/blast/distributor_node_start.go rename to cmd/blast/dispatcher_start.go index bba46ee..9c9540f 100644 --- a/cmd/blast/distributor_node_start.go +++ b/cmd/blast/dispatcher_start.go @@ -25,8 +25,8 @@ import ( "github.com/urfave/cli" ) -func distributorNodeStart(c *cli.Context) error { - managerAddr := c.String("cluster-grpc-address") +func dispatcherStart(c *cli.Context) error { + managerAddr := c.String("manager-grpc-address") grpcAddr := c.String("grpc-address") httpAddr := c.String("http-address") diff --git a/cmd/blast/indexer_peers_info.go b/cmd/blast/indexer_cluster_info.go similarity index 96% rename from cmd/blast/indexer_peers_info.go rename to cmd/blast/indexer_cluster_info.go index f798ea7..3e8f1d8 100644 --- a/cmd/blast/indexer_peers_info.go +++ b/cmd/blast/indexer_cluster_info.go @@ -23,7 +23,7 @@ import ( "github.com/urfave/cli" ) -func indexerPeersInfo(c *cli.Context) error { +func indexerClusterInfo(c *cli.Context) error { grpcAddr := c.String("grpc-address") client, err := indexer.NewGRPCClient(grpcAddr) diff --git a/cmd/blast/indexer_node_leave.go b/cmd/blast/indexer_cluster_leave.go similarity index 92% rename from cmd/blast/indexer_node_leave.go rename to cmd/blast/indexer_cluster_leave.go index 5255586..b0be2d9 100644 --- a/cmd/blast/indexer_node_leave.go +++ b/cmd/blast/indexer_cluster_leave.go @@ -22,8 +22,8 @@ import ( "github.com/urfave/cli" ) -func indexerNodeLeave(c *cli.Context) error { - clusterGrpcAddr := c.String("cluster-grpc-address") +func indexerClusterLeave(c *cli.Context) error { + clusterGrpcAddr := c.String("manager-grpc-address") shardId := c.String("shard-id") peerGrpcAddr := c.String("peer-grpc-address") diff --git a/cmd/blast/indexer_peers_watch.go b/cmd/blast/indexer_cluster_watch.go similarity index 95% rename from cmd/blast/indexer_peers_watch.go rename to cmd/blast/indexer_cluster_watch.go index 1169a9d..1a5097f 100644 --- a/cmd/blast/indexer_peers_watch.go +++ b/cmd/blast/indexer_cluster_watch.go @@ -27,7 +27,7 @@ import ( "github.com/urfave/cli" ) -func indexerPeersWatch(c *cli.Context) error { +func indexerClusterWatch(c *cli.Context) error { grpcAddr := c.String("grpc-address") client, err := indexer.NewGRPCClient(grpcAddr) @@ -41,7 +41,7 @@ func indexerPeersWatch(c *cli.Context) error { } }() - err = indexerPeersInfo(c) + err = indexerClusterInfo(c) if err != nil { return err } diff --git a/cmd/blast/indexer_node_snapshot.go b/cmd/blast/indexer_snapshot.go similarity index 95% rename from cmd/blast/indexer_node_snapshot.go rename to cmd/blast/indexer_snapshot.go index d59ab92..c34459a 100644 --- a/cmd/blast/indexer_node_snapshot.go +++ b/cmd/blast/indexer_snapshot.go @@ -22,7 +22,7 @@ import ( "github.com/urfave/cli" ) -func indexerNodeSnapshot(c *cli.Context) error { +func indexerSnapshot(c *cli.Context) error { grpcAddr := c.String("grpc-address") client, err := indexer.NewGRPCClient(grpcAddr) diff --git a/cmd/blast/indexer_node_start.go b/cmd/blast/indexer_start.go similarity index 97% rename from cmd/blast/indexer_node_start.go rename to cmd/blast/indexer_start.go index 387b57e..a716efe 100644 --- a/cmd/blast/indexer_node_start.go +++ b/cmd/blast/indexer_start.go @@ -27,8 +27,8 @@ import ( "github.com/urfave/cli" ) -func indexerNodeStart(c *cli.Context) error { - clusterGRPCAddr := c.String("cluster-grpc-address") +func indexerStart(c *cli.Context) error { + clusterGRPCAddr := c.String("manager-grpc-address") shardId := c.String("shard-id") peerGRPCAddr := c.String("peer-grpc-address") diff --git a/cmd/blast/main.go b/cmd/blast/main.go index c5725e1..cdf357e 100644 --- a/cmd/blast/main.go +++ b/cmd/blast/main.go @@ -38,179 +38,179 @@ func main() { app.Commands = []cli.Command{ { - Name: "cluster", - Usage: "Command for blast cluster", + Name: "manager", + Usage: "Command for blast manager", Subcommands: []cli.Command{ + { + Name: "start", + Usage: "Start blast manager", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "peer-grpc-address", + Value: "", + EnvVar: "BLAST_MANAGER_PEER_GRPC_ADDRESS", + Usage: "The gRPC address of the peer node that exists in the cluster to be joined", + }, + cli.StringFlag{ + Name: "grpc-address", + Value: ":5100", + EnvVar: "BLAST_MANAGER_GRPC_ADDRESS", + Usage: "The gRPC listen address", + }, + cli.StringFlag{ + Name: "http-address", + Value: ":8100", + EnvVar: "BLAST_MANAGER_HTTP_ADDRESS", + Usage: "HTTP listen address", + }, + cli.StringFlag{ + Name: "node-id", + Value: "", + EnvVar: "BLAST_MANAGER_NODE_ID", + Usage: "Unique ID to identify the node", + }, + cli.StringFlag{ + Name: "node-address", + Value: ":2100", + EnvVar: "BLAST_MANAGER_NODE_ADDRESS", + Usage: "The address that should be bound to for internal cluster communications", + }, + cli.StringFlag{ + Name: "data-dir", + Value: "/tmp/blast/indexer", + EnvVar: "BLAST_MANAGER_DATA_DIR", + Usage: "A data directory for the node to store state", + }, + cli.StringFlag{ + Name: "raft-storage-type", + Value: "boltdb", + EnvVar: "BLAST_MANAGER_RAFT_STORAGE_TYPE", + Usage: "Storage type of the database that stores the state", + }, + cli.StringFlag{ + Name: "index-mapping-file", + Value: "", + EnvVar: "BLAST_MANAGER_INDEX_MAPPING_FILE", + Usage: "An index mapping file to use", + }, + cli.StringFlag{ + Name: "index-type", + Value: bleve.Config.DefaultIndexType, + EnvVar: "BLAST_MANAGER_INDEX_TYPE", + Usage: "An index type to use", + }, + cli.StringFlag{ + Name: "index-storage-type", + Value: bleve.Config.DefaultKVStore, + EnvVar: "BLAST_MANAGER_INDEX_STORAGE_TYPE", + Usage: "An index storage type to use", + }, + cli.StringFlag{ + Name: "log-level", + Value: "INFO", + EnvVar: "BLAST_MANAGER_LOG_LEVEL", + Usage: "Log level", + }, + cli.StringFlag{ + Name: "log-file", + Value: os.Stderr.Name(), + EnvVar: "BLAST_MANAGER_LOG_FILE", + Usage: "Log file", + }, + cli.IntFlag{ + Name: "log-max-size", + Value: 500, + EnvVar: "BLAST_MANAGER_LOG_MAX_SIZE", + Usage: "Max size of a log file (megabytes)", + }, + cli.IntFlag{ + Name: "log-max-backups", + Value: 3, + EnvVar: "BLAST_MANAGER_LOG_MAX_BACKUPS", + Usage: "Max backup count of log files", + }, + cli.IntFlag{ + Name: "log-max-age", + Value: 30, + EnvVar: "BLAST_MANAGER_LOG_MAX_AGE", + Usage: "Max age of a log file (days)", + }, + cli.BoolFlag{ + Name: "log-compress", + EnvVar: "BLAST_MANAGER_LOG_COMPRESS", + Usage: "Compress a log file", + }, + cli.StringFlag{ + Name: "grpc-log-level", + Value: "WARN", + EnvVar: "BLAST_MANAGER_GRPC_LOG_LEVEL", + Usage: "gRPC log level", + }, + cli.StringFlag{ + Name: "grpc-log-file", + Value: os.Stderr.Name(), + EnvVar: "BLAST_MANAGER_GRPC_LOG_FILE", + Usage: "gRPC log file", + }, + cli.IntFlag{ + Name: "grpc-log-max-size", + Value: 500, + EnvVar: "BLAST_MANAGER_GRPC_LOG_MAX_SIZE", + Usage: "Max size of a log file (megabytes)", + }, + cli.IntFlag{ + Name: "grpc-log-max-backups", + Value: 3, + EnvVar: "BLAST_MANAGER_GRPC_LOG_MAX_BACKUPS", + Usage: "Max backup count of log files", + }, + cli.IntFlag{ + Name: "grpc-log-max-age", + Value: 30, + EnvVar: "BLAST_MANAGER_GRPC_LOG_MAX_AGE", + Usage: "Max age of a log file (days)", + }, + cli.BoolFlag{ + Name: "grpc-log-compress", + EnvVar: "BLAST_MANAGER_GRPC_LOG_COMPRESS", + Usage: "Compress a log file", + }, + cli.StringFlag{ + Name: "http-log-file", + Value: os.Stderr.Name(), + EnvVar: "BLAST_MANAGER_HTTP_LOG_FILE", + Usage: "HTTP access log file", + }, + cli.IntFlag{ + Name: "http-log-max-size", + Value: 500, + EnvVar: "BLAST_MANAGER_HTTP_LOG_MAX_SIZE", + Usage: "Max size of a HTTP access log file (megabytes)", + }, + cli.IntFlag{ + Name: "http-log-max-backups", + Value: 3, + EnvVar: "BLAST_MANAGER_HTTP_LOG_MAX_BACKUPS", + Usage: "Max backup count of HTTP access log files", + }, + cli.IntFlag{ + Name: "http-log-max-age", + Value: 30, + EnvVar: "BLAST_MANAGER_HTTP_LOG_MAX_AGE", + Usage: "Max age of a HTTP access log file (days)", + }, + cli.BoolFlag{ + Name: "http-log-compress", + EnvVar: "BLAST_MANAGER_HTTP_LOG_COMPRESS", + Usage: "Compress a HTTP access log", + }, + }, + Action: managerStart, + }, { Name: "node", - Usage: "Command for blast cluster node", + Usage: "Command for blast manager node", Subcommands: []cli.Command{ - { - Name: "start", - Usage: "Start blast cluster node", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "peer-grpc-address", - Value: "", - EnvVar: "BLAST_CLUSTER_PEER_GRPC_ADDRESS", - Usage: "The gRPC address of the peer node that exists in the cluster to be joined", - }, - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - EnvVar: "BLAST_CLUSTER_GRPC_ADDRESS", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "http-address", - Value: ":8100", - EnvVar: "BLAST_CLUSTER_HTTP_ADDRESS", - Usage: "HTTP listen address", - }, - cli.StringFlag{ - Name: "node-id", - Value: "", - EnvVar: "BLAST_CLUSTER_NODE_ID", - Usage: "Unique ID to identify the node", - }, - cli.StringFlag{ - Name: "node-address", - Value: ":2100", - EnvVar: "BLAST_CLUSTER_NODE_ADDRESS", - Usage: "The address that should be bound to for internal cluster communications", - }, - cli.StringFlag{ - Name: "data-dir", - Value: "/tmp/blast/indexer", - EnvVar: "BLAST_CLUSTER_DATA_DIR", - Usage: "A data directory for the node to store state", - }, - cli.StringFlag{ - Name: "raft-storage-type", - Value: "boltdb", - EnvVar: "BLAST_CLUSTER_RAFT_STORAGE_TYPE", - Usage: "Storage type of the database that stores the state", - }, - cli.StringFlag{ - Name: "index-mapping-file", - Value: "", - EnvVar: "BLAST_CLUSTER_INDEX_MAPPING_FILE", - Usage: "An index mapping file to use", - }, - cli.StringFlag{ - Name: "index-type", - Value: bleve.Config.DefaultIndexType, - EnvVar: "BLAST_CLUSTER_INDEX_TYPE", - Usage: "An index type to use", - }, - cli.StringFlag{ - Name: "index-storage-type", - Value: bleve.Config.DefaultKVStore, - EnvVar: "BLAST_CLUSTER_INDEX_STORAGE_TYPE", - Usage: "An index storage type to use", - }, - cli.StringFlag{ - Name: "log-level", - Value: "INFO", - EnvVar: "BLAST_CLUSTER_LOG_LEVEL", - Usage: "Log level", - }, - cli.StringFlag{ - Name: "log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_CLUSTER_LOG_FILE", - Usage: "Log file", - }, - cli.IntFlag{ - Name: "log-max-size", - Value: 500, - EnvVar: "BLAST_CLUSTER_LOG_MAX_SIZE", - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "log-max-backups", - Value: 3, - EnvVar: "BLAST_CLUSTER_LOG_MAX_BACKUPS", - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "log-max-age", - Value: 30, - EnvVar: "BLAST_CLUSTER_LOG_MAX_AGE", - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "log-compress", - EnvVar: "BLAST_CLUSTER_LOG_COMPRESS", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "grpc-log-level", - Value: "WARN", - EnvVar: "BLAST_CLUSTER_GRPC_LOG_LEVEL", - Usage: "gRPC log level", - }, - cli.StringFlag{ - Name: "grpc-log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_CLUSTER_GRPC_LOG_FILE", - Usage: "gRPC log file", - }, - cli.IntFlag{ - Name: "grpc-log-max-size", - Value: 500, - EnvVar: "BLAST_CLUSTER_GRPC_LOG_MAX_SIZE", - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "grpc-log-max-backups", - Value: 3, - EnvVar: "BLAST_CLUSTER_GRPC_LOG_MAX_BACKUPS", - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "grpc-log-max-age", - Value: 30, - EnvVar: "BLAST_CLUSTER_GRPC_LOG_MAX_AGE", - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "grpc-log-compress", - EnvVar: "BLAST_CLUSTER_GRPC_LOG_COMPRESS", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "http-log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_CLUSTER_HTTP_LOG_FILE", - Usage: "HTTP access log file", - }, - cli.IntFlag{ - Name: "http-log-max-size", - Value: 500, - EnvVar: "BLAST_CLUSTER_HTTP_LOG_MAX_SIZE", - Usage: "Max size of a HTTP access log file (megabytes)", - }, - cli.IntFlag{ - Name: "http-log-max-backups", - Value: 3, - EnvVar: "BLAST_CLUSTER_HTTP_LOG_MAX_BACKUPS", - Usage: "Max backup count of HTTP access log files", - }, - cli.IntFlag{ - Name: "http-log-max-age", - Value: 30, - EnvVar: "BLAST_CLUSTER_HTTP_LOG_MAX_AGE", - Usage: "Max age of a HTTP access log file (days)", - }, - cli.BoolFlag{ - Name: "http-log-compress", - EnvVar: "BLAST_CLUSTER_HTTP_LOG_COMPRESS", - Usage: "Compress a HTTP access log", - }, - }, - Action: clusterNodeStart, - }, { Name: "info", Usage: "Get node information", @@ -231,28 +231,11 @@ func main() { Usage: "The gRPC address of the node for which to retrieve the node information", }, }, - Action: clusterNodeInfo, - }, - { - Name: "leave", - Usage: "Leave the node from the cluster", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "peer-grpc-address", - Value: "", - Usage: "The gRPC address of the peer node that exists in the cluster to be joined", - }, - cli.StringFlag{ - Name: "node-id", - Value: "", - Usage: "The gRPC listen address", - }, - }, - Action: clusterNodeLeave, + Action: managerNodeInfo, }, { Name: "health", - Usage: "Health check", + Usage: "Health check the node", Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", @@ -268,29 +251,17 @@ func main() { Usage: "Readiness probe", }, }, - Action: clusterNodeHealth, - }, - { - Name: "snapshot", - Usage: "Snapshot", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: "", - Usage: "The gRPC listen address", - }, - }, - Action: clusterNodeSnapshot, + Action: managerNodeHealth, }, }, }, { - Name: "peers", - Usage: "Command for blast cluster peers", + Name: "cluster", + Usage: "Command for blast manager cluster", Subcommands: []cli.Command{ { Name: "info", - Usage: "Get peers", + Usage: "Get cluster information", Flags: []cli.Flag{ //cli.StringFlag{ // Name: "cluster-grpc-address", @@ -318,7 +289,7 @@ func main() { Usage: "The gRPC address of the node for which to retrieve the node information", }, }, - Action: clusterPeersInfo, + Action: managerClusterInfo, }, { Name: "watch", @@ -350,7 +321,24 @@ func main() { Usage: "The gRPC address of the node for which to retrieve the node information", }, }, - Action: clusterPeersWatch, + Action: managerClusterWatch, + }, + { + Name: "leave", + Usage: "Leave the manager from the cluster", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "peer-grpc-address", + Value: "", + Usage: "The gRPC address of the peer node that exists in the cluster to be joined", + }, + cli.StringFlag{ + Name: "node-id", + Value: "", + Usage: "The gRPC listen address", + }, + }, + Action: managerClusterLeave, }, }, }, @@ -365,7 +353,7 @@ func main() { }, }, ArgsUsage: "[key]", - Action: clusterGet, + Action: managerGet, }, { Name: "set", @@ -383,7 +371,7 @@ func main() { }, }, ArgsUsage: "[key] [value]", - Action: clusterSet, + Action: managerSet, }, { Name: "delete", @@ -396,7 +384,7 @@ func main() { }, }, ArgsUsage: "[key]", - Action: clusterDelete, + Action: managerDelete, }, { Name: "watch", @@ -409,7 +397,19 @@ func main() { }, }, ArgsUsage: "[key]", - Action: clusterWatch, + Action: managerWatch, + }, + { + Name: "snapshot", + Usage: "Snapshot the data", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", + }, + }, + Action: managerSnapshot, }, }, }, @@ -417,188 +417,188 @@ func main() { Name: "indexer", Usage: "Command for blast indexer", Subcommands: []cli.Command{ + { + Name: "start", + Usage: "Start blast indexer", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "manager-grpc-address", + Value: "", + EnvVar: "BLAST_INDEXER_MANAGER_GRPC_ADDRESS", + Usage: "The gRPC address of the existing cluster manager to be joined", + }, + cli.StringFlag{ + Name: "shard-id", + Value: "", + EnvVar: "BLAST_INDEXER_SHARD_ID", + Usage: "Shard ID registered in the existing cluster to be joined", + }, + cli.StringFlag{ + Name: "peer-grpc-address", + Value: "", + EnvVar: "BLAST_INDEXER_PEER_GRPC_ADDRESS", + Usage: "The gRPC address of the peer node that exists in the cluster to be joined", + }, + cli.StringFlag{ + Name: "grpc-address", + Value: ":5000", + EnvVar: "BLAST_INDEXER_GRPC_ADDRESS", + Usage: "The gRPC listen address", + }, + cli.StringFlag{ + Name: "http-address", + Value: ":8000", + EnvVar: "BLAST_INDEXER_HTTP_ADDRESS", + Usage: "HTTP listen address", + }, + cli.StringFlag{ + Name: "node-id", + Value: "", + EnvVar: "BLAST_INDEXER_NODE_ID", + Usage: "Unique ID to identify the node", + }, + cli.StringFlag{ + Name: "node-address", + Value: ":2000", + EnvVar: "BLAST_INDEXER_NODE_ADDRESS", + Usage: "The address that should be bound to for internal cluster communications", + }, + cli.StringFlag{ + Name: "data-dir", + Value: "/tmp/blast/indexer", + EnvVar: "BLAST_INDEXER_DATA_DIR", + Usage: "A data directory for the node to store state", + }, + cli.StringFlag{ + Name: "raft-storage-type", + Value: "boltdb", + EnvVar: "BLAST_INDEXER_RAFT_STORAGE_TYPE", + Usage: "Storage type of the database that stores the state", + }, + cli.StringFlag{ + Name: "index-mapping-file", + Value: "", + EnvVar: "BLAST_INDEXER_INDEX_MAPPING_FILE", + Usage: "An index mapping file to use", + }, + cli.StringFlag{ + Name: "index-type", + Value: bleve.Config.DefaultIndexType, + EnvVar: "BLAST_INDEXER_INDEX_TYPE", + Usage: "An index type to use", + }, + cli.StringFlag{ + Name: "index-storage-type", + Value: bleve.Config.DefaultKVStore, + EnvVar: "BLAST_INDEXER_INDEX_STORAGE_TYPE", + Usage: "An index storage type to use", + }, + cli.StringFlag{ + Name: "log-level", + Value: "INFO", + EnvVar: "BLAST_INDEXER_LOG_LEVEL", + Usage: "Log level", + }, + cli.StringFlag{ + Name: "log-file", + Value: os.Stderr.Name(), + EnvVar: "BLAST_INDEXER_LOG_FILE", + Usage: "Log file", + }, + cli.IntFlag{ + Name: "log-max-size", + Value: 500, + EnvVar: "BLAST_INDEXER_LOG_MAX_SIZE", + Usage: "Max size of a log file (megabytes)", + }, + cli.IntFlag{ + Name: "log-max-backups", + Value: 3, + EnvVar: "BLAST_INDEXER_LOG_MAX_BACKUPS", + Usage: "Max backup count of log files", + }, + cli.IntFlag{ + Name: "log-max-age", + Value: 30, + EnvVar: "BLAST_INDEXER_LOG_MAX_AGE", + Usage: "Max age of a log file (days)", + }, + cli.BoolFlag{ + Name: "log-compress", + EnvVar: "BLAST_INDEXER_LOG_COMPRESS", + Usage: "Compress a log file", + }, + cli.StringFlag{ + Name: "grpc-log-level", + Value: "WARN", + EnvVar: "BLAST_INDEXER_GRPC_LOG_LEVEL", + Usage: "gRPC log level", + }, + cli.StringFlag{ + Name: "grpc-log-file", + Value: os.Stderr.Name(), + EnvVar: "BLAST_INDEXER_GRPC_LOG_FILE", + Usage: "gRPC log file", + }, + cli.IntFlag{ + Name: "grpc-log-max-size", + Value: 500, + EnvVar: "BLAST_INDEXER_GRPC_LOG_MAX_SIZE", + Usage: "Max size of a log file (megabytes)", + }, + cli.IntFlag{ + Name: "grpc-log-max-backups", + Value: 3, + EnvVar: "BLAST_INDEXER_GRPC_LOG_MAX_BACKUPS", + Usage: "Max backup count of log files", + }, + cli.IntFlag{ + Name: "grpc-log-max-age", + Value: 30, + EnvVar: "BLAST_INDEXER_GRPC_LOG_MAX_AGE", + Usage: "Max age of a log file (days)", + }, + cli.BoolFlag{ + Name: "grpc-log-compress", + EnvVar: "BLAST_INDEXER_GRPC_LOG_COMPRESS", + Usage: "Compress a log file", + }, + cli.StringFlag{ + Name: "http-log-file", + Value: os.Stderr.Name(), + EnvVar: "BLAST_INDEXER_HTTP_LOG_FILE", + Usage: "HTTP access log file", + }, + cli.IntFlag{ + Name: "http-log-max-size", + Value: 500, + EnvVar: "BLAST_INDEXER_HTTP_LOG_MAX_SIZE", + Usage: "Max size of a HTTP access log file (megabytes)", + }, + cli.IntFlag{ + Name: "http-log-max-backups", + Value: 3, + EnvVar: "BLAST_INDEXER_HTTP_LOG_MAX_BACKUPS", + Usage: "Max backup count of HTTP access log files", + }, + cli.IntFlag{ + Name: "http-log-max-age", + Value: 30, + EnvVar: "BLAST_INDEXER_HTTP_LOG_MAX_AGE", + Usage: "Max age of a HTTP access log file (days)", + }, + cli.BoolFlag{ + Name: "http-log-compress", + EnvVar: "BLAST_INDEXER_HTTP_LOG_COMPRESS", + Usage: "Compress a HTTP access log", + }, + }, + Action: indexerStart, + }, { Name: "node", Usage: "Command for blast indexer node", Subcommands: []cli.Command{ - { - Name: "start", - Usage: "Start blast indexer node", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "cluster-grpc-address", - Value: "", - EnvVar: "BLAST_INDEXER_CLUSTER_GRPC_ADDRESS", - Usage: "The gRPC address of the existing cluster node to be joined", - }, - cli.StringFlag{ - Name: "shard-id", - Value: "", - EnvVar: "BLAST_INDEXER_SHARD_ID", - Usage: "Shard ID registered in the existing cluster to be joined", - }, - cli.StringFlag{ - Name: "peer-grpc-address", - Value: "", - EnvVar: "BLAST_INDEXER_PEER_GRPC_ADDRESS", - Usage: "The gRPC address of the peer node that exists in the cluster to be joined", - }, - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - EnvVar: "BLAST_INDEXER_GRPC_ADDRESS", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "http-address", - Value: ":8000", - EnvVar: "BLAST_INDEXER_HTTP_ADDRESS", - Usage: "HTTP listen address", - }, - cli.StringFlag{ - Name: "node-id", - Value: "", - EnvVar: "BLAST_INDEXER_NODE_ID", - Usage: "Unique ID to identify the node", - }, - cli.StringFlag{ - Name: "node-address", - Value: ":2000", - EnvVar: "BLAST_INDEXER_NODE_ADDRESS", - Usage: "The address that should be bound to for internal cluster communications", - }, - cli.StringFlag{ - Name: "data-dir", - Value: "/tmp/blast/indexer", - EnvVar: "BLAST_INDEXER_DATA_DIR", - Usage: "A data directory for the node to store state", - }, - cli.StringFlag{ - Name: "raft-storage-type", - Value: "boltdb", - EnvVar: "BLAST_INDEXER_RAFT_STORAGE_TYPE", - Usage: "Storage type of the database that stores the state", - }, - cli.StringFlag{ - Name: "index-mapping-file", - Value: "", - EnvVar: "BLAST_INDEXER_INDEX_MAPPING_FILE", - Usage: "An index mapping file to use", - }, - cli.StringFlag{ - Name: "index-type", - Value: bleve.Config.DefaultIndexType, - EnvVar: "BLAST_INDEXER_INDEX_TYPE", - Usage: "An index type to use", - }, - cli.StringFlag{ - Name: "index-storage-type", - Value: bleve.Config.DefaultKVStore, - EnvVar: "BLAST_INDEXER_INDEX_STORAGE_TYPE", - Usage: "An index storage type to use", - }, - cli.StringFlag{ - Name: "log-level", - Value: "INFO", - EnvVar: "BLAST_INDEXER_LOG_LEVEL", - Usage: "Log level", - }, - cli.StringFlag{ - Name: "log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_INDEXER_LOG_FILE", - Usage: "Log file", - }, - cli.IntFlag{ - Name: "log-max-size", - Value: 500, - EnvVar: "BLAST_INDEXER_LOG_MAX_SIZE", - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "log-max-backups", - Value: 3, - EnvVar: "BLAST_INDEXER_LOG_MAX_BACKUPS", - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "log-max-age", - Value: 30, - EnvVar: "BLAST_INDEXER_LOG_MAX_AGE", - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "log-compress", - EnvVar: "BLAST_INDEXER_LOG_COMPRESS", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "grpc-log-level", - Value: "WARN", - EnvVar: "BLAST_INDEXER_GRPC_LOG_LEVEL", - Usage: "gRPC log level", - }, - cli.StringFlag{ - Name: "grpc-log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_INDEXER_GRPC_LOG_FILE", - Usage: "gRPC log file", - }, - cli.IntFlag{ - Name: "grpc-log-max-size", - Value: 500, - EnvVar: "BLAST_INDEXER_GRPC_LOG_MAX_SIZE", - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "grpc-log-max-backups", - Value: 3, - EnvVar: "BLAST_INDEXER_GRPC_LOG_MAX_BACKUPS", - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "grpc-log-max-age", - Value: 30, - EnvVar: "BLAST_INDEXER_GRPC_LOG_MAX_AGE", - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "grpc-log-compress", - EnvVar: "BLAST_INDEXER_GRPC_LOG_COMPRESS", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "http-log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_INDEXER_HTTP_LOG_FILE", - Usage: "HTTP access log file", - }, - cli.IntFlag{ - Name: "http-log-max-size", - Value: 500, - EnvVar: "BLAST_INDEXER_HTTP_LOG_MAX_SIZE", - Usage: "Max size of a HTTP access log file (megabytes)", - }, - cli.IntFlag{ - Name: "http-log-max-backups", - Value: 3, - EnvVar: "BLAST_INDEXER_HTTP_LOG_MAX_BACKUPS", - Usage: "Max backup count of HTTP access log files", - }, - cli.IntFlag{ - Name: "http-log-max-age", - Value: 30, - EnvVar: "BLAST_INDEXER_HTTP_LOG_MAX_AGE", - Usage: "Max age of a HTTP access log file (days)", - }, - cli.BoolFlag{ - Name: "http-log-compress", - EnvVar: "BLAST_INDEXER_HTTP_LOG_COMPRESS", - Usage: "Compress a HTTP access log", - }, - }, - Action: indexerNodeStart, - }, { Name: "info", Usage: "Get node information", @@ -631,41 +631,9 @@ func main() { }, Action: indexerNodeInfo, }, - { - Name: "leave", - Usage: "Leave the node from the cluster", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "cluster-grpc-address", - Value: "", - Usage: "The gRPC address of the existing cluster node to be joined", - }, - cli.StringFlag{ - Name: "shard-id", - Value: "", - Usage: "Shard ID registered in the existing cluster to be joined", - }, - cli.StringFlag{ - Name: "peer-grpc-address", - Value: "", - Usage: "The gRPC address of the peer node that exists in the cluster to be joined", - }, - cli.StringFlag{ - Name: "grpc-address", - Value: "", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "node-id", - Value: "", - Usage: "Node ID to delete", - }, - }, - Action: indexerNodeLeave, - }, { Name: "health", - Usage: "Health check", + Usage: "Health check the node", Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", @@ -683,27 +651,15 @@ func main() { }, Action: indexerNodeHealth, }, - { - Name: "snapshot", - Usage: "Snapshot", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: "", - Usage: "The gRPC listen address", - }, - }, - Action: indexerNodeSnapshot, - }, }, }, { - Name: "peers", - Usage: "Command for blast indexer peers", + Name: "cluster", + Usage: "Command for blast indexer cluster", Subcommands: []cli.Command{ { Name: "info", - Usage: "Get peers", + Usage: "Get cluster information", Flags: []cli.Flag{ //cli.StringFlag{ // Name: "cluster-grpc-address", @@ -731,11 +687,11 @@ func main() { Usage: "The gRPC address of the node for which to retrieve the node information", }, }, - Action: indexerPeersInfo, + Action: indexerClusterInfo, }, { Name: "watch", - Usage: "Watch peers", + Usage: "Watch cluster", Flags: []cli.Flag{ //cli.StringFlag{ // Name: "cluster-grpc-address", @@ -763,7 +719,39 @@ func main() { Usage: "The gRPC address of the node for which to retrieve the node information", }, }, - Action: indexerPeersWatch, + Action: indexerClusterWatch, + }, + { + Name: "leave", + Usage: "Leave the indexer from the cluster", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "manager-grpc-address", + Value: "", + Usage: "The gRPC address of the existing cluster node to be joined", + }, + cli.StringFlag{ + Name: "shard-id", + Value: "", + Usage: "Shard ID registered in the existing cluster to be joined", + }, + cli.StringFlag{ + Name: "peer-grpc-address", + Value: "", + Usage: "The gRPC address of the peer node that exists in the cluster to be joined", + }, + cli.StringFlag{ + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", + }, + cli.StringFlag{ + Name: "node-id", + Value: "", + Usage: "Node ID to delete", + }, + }, + Action: indexerClusterLeave, }, }, }, @@ -782,7 +770,7 @@ func main() { Usage: "Document ID list", }, }, - ArgsUsage: "[document IDs]", + ArgsUsage: "[document ID]", Action: indexerGet, }, { @@ -822,7 +810,7 @@ func main() { Usage: "Document ID list", }, }, - ArgsUsage: "[document IDs]", + ArgsUsage: "[document ID]", Action: indexerDelete, }, { @@ -843,143 +831,155 @@ func main() { ArgsUsage: "[search request]", Action: indexerSearch, }, + { + Name: "snapshot", + Usage: "Snapshot", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "grpc-address", + Value: "", + Usage: "The gRPC listen address", + }, + }, + Action: indexerSnapshot, + }, }, }, { - Name: "distributor", - Usage: "Command for blast distributor", + Name: "dispatcher", + Usage: "Command for blast dispatcher", Subcommands: []cli.Command{ + { + Name: "start", + Usage: "Start blast dispatcher", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "manager-grpc-address", + Value: ":5100", + EnvVar: "BLAST_DISPATCHER_CLUSTER_GRPC_ADDRESS", + Usage: "The gRPC address of the existing cluster node to be joined", + }, + cli.StringFlag{ + Name: "grpc-address", + Value: ":5200", + EnvVar: "BLAST_DISPATCHER_GRPC_ADDRESS", + Usage: "The gRPC listen address", + }, + cli.StringFlag{ + Name: "http-address", + Value: ":8200", + EnvVar: "BLAST_DISPATCHER_HTTP_ADDRESS", + Usage: "HTTP listen address", + }, + cli.StringFlag{ + Name: "log-level", + Value: "INFO", + EnvVar: "BLAST_DISPATCHER_LOG_LEVEL", + Usage: "Log level", + }, + cli.StringFlag{ + Name: "log-file", + Value: os.Stderr.Name(), + EnvVar: "BLAST_DISPATCHER_LOG_FILE", + Usage: "Log file", + }, + cli.IntFlag{ + Name: "log-max-size", + Value: 500, + EnvVar: "BLAST_DISPATCHER_LOG_MAX_SIZE", + Usage: "Max size of a log file (megabytes)", + }, + cli.IntFlag{ + Name: "log-max-backups", + Value: 3, + EnvVar: "BLAST_DISPATCHER_LOG_MAX_BACKUPS", + Usage: "Max backup count of log files", + }, + cli.IntFlag{ + Name: "log-max-age", + Value: 30, + EnvVar: "BLAST_DISPATCHER_LOG_MAX_AGE", + Usage: "Max age of a log file (days)", + }, + cli.BoolFlag{ + Name: "log-compress", + EnvVar: "BLAST_DISPATCHER_LOG_COMPRESS", + Usage: "Compress a log file", + }, + cli.StringFlag{ + Name: "grpc-log-level", + Value: "WARN", + EnvVar: "BLAST_DISPATCHER_GRPC_LOG_LEVEL", + Usage: "gRPC log level", + }, + cli.StringFlag{ + Name: "grpc-log-file", + Value: os.Stderr.Name(), + EnvVar: "BLAST_DISPATCHER_GRPC_LOG_FILE", + Usage: "gRPC log file", + }, + cli.IntFlag{ + Name: "grpc-log-max-size", + Value: 500, + EnvVar: "BLAST_DISPATCHER_GRPC_LOG_MAX_SIZE", + Usage: "Max size of a log file (megabytes)", + }, + cli.IntFlag{ + Name: "grpc-log-max-backups", + Value: 3, + EnvVar: "BLAST_DISPATCHER_GRPC_LOG_MAX_BACKUPS", + Usage: "Max backup count of log files", + }, + cli.IntFlag{ + Name: "grpc-log-max-age", + Value: 30, + EnvVar: "BLAST_DISPATCHER_GRPC_LOG_MAX_AGE", + Usage: "Max age of a log file (days)", + }, + cli.BoolFlag{ + Name: "grpc-log-compress", + EnvVar: "BLAST_DISPATCHER_GRPC_LOG_COMPRESS", + Usage: "Compress a log file", + }, + cli.StringFlag{ + Name: "http-log-file", + Value: os.Stderr.Name(), + EnvVar: "BLAST_DISPATCHER_HTTP_LOG_FILE", + Usage: "HTTP access log file", + }, + cli.IntFlag{ + Name: "http-log-max-size", + Value: 500, + EnvVar: "BLAST_DISPATCHER_HTTP_LOG_MAX_SIZE", + Usage: "Max size of a HTTP access log file (megabytes)", + }, + cli.IntFlag{ + Name: "http-log-max-backups", + Value: 3, + EnvVar: "BLAST_DISPATCHER_HTTP_LOG_MAX_BACKUPS", + Usage: "Max backup count of HTTP access log files", + }, + cli.IntFlag{ + Name: "http-log-max-age", + Value: 30, + EnvVar: "BLAST_DISPATCHER_HTTP_LOG_MAX_AGE", + Usage: "Max age of a HTTP access log file (days)", + }, + cli.BoolFlag{ + Name: "http-log-compress", + EnvVar: "BLAST_DISPATCHER_HTTP_LOG_COMPRESS", + Usage: "Compress a HTTP access log", + }, + }, + Action: dispatcherStart, + }, { Name: "node", - Usage: "Command for blast distributor node", + Usage: "Command for blast dispatcher node", Subcommands: []cli.Command{ - { - Name: "start", - Usage: "Start blast distributor node", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "cluster-grpc-address", - Value: ":5100", - EnvVar: "BLAST_DISTRIBUTOR_CLUSTER_GRPC_ADDRESS", - Usage: "The gRPC address of the existing cluster node to be joined", - }, - cli.StringFlag{ - Name: "grpc-address", - Value: ":5200", - EnvVar: "BLAST_DISTRIBUTOR_GRPC_ADDRESS", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "http-address", - Value: ":8200", - EnvVar: "BLAST_DISTRIBUTOR_HTTP_ADDRESS", - Usage: "HTTP listen address", - }, - cli.StringFlag{ - Name: "log-level", - Value: "INFO", - EnvVar: "BLAST_DISTRIBUTOR_LOG_LEVEL", - Usage: "Log level", - }, - cli.StringFlag{ - Name: "log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_DISTRIBUTOR_LOG_FILE", - Usage: "Log file", - }, - cli.IntFlag{ - Name: "log-max-size", - Value: 500, - EnvVar: "BLAST_DISTRIBUTOR_LOG_MAX_SIZE", - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "log-max-backups", - Value: 3, - EnvVar: "BLAST_DISTRIBUTOR_LOG_MAX_BACKUPS", - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "log-max-age", - Value: 30, - EnvVar: "BLAST_DISTRIBUTOR_LOG_MAX_AGE", - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "log-compress", - EnvVar: "BLAST_DISTRIBUTOR_LOG_COMPRESS", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "grpc-log-level", - Value: "WARN", - EnvVar: "BLAST_DISTRIBUTOR_GRPC_LOG_LEVEL", - Usage: "gRPC log level", - }, - cli.StringFlag{ - Name: "grpc-log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_DISTRIBUTOR_GRPC_LOG_FILE", - Usage: "gRPC log file", - }, - cli.IntFlag{ - Name: "grpc-log-max-size", - Value: 500, - EnvVar: "BLAST_DISTRIBUTOR_GRPC_LOG_MAX_SIZE", - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "grpc-log-max-backups", - Value: 3, - EnvVar: "BLAST_DISTRIBUTOR_GRPC_LOG_MAX_BACKUPS", - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "grpc-log-max-age", - Value: 30, - EnvVar: "BLAST_DISTRIBUTOR_GRPC_LOG_MAX_AGE", - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "grpc-log-compress", - EnvVar: "BLAST_DISTRIBUTOR_GRPC_LOG_COMPRESS", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "http-log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_DISTRIBUTOR_HTTP_LOG_FILE", - Usage: "HTTP access log file", - }, - cli.IntFlag{ - Name: "http-log-max-size", - Value: 500, - EnvVar: "BLAST_DISTRIBUTOR_HTTP_LOG_MAX_SIZE", - Usage: "Max size of a HTTP access log file (megabytes)", - }, - cli.IntFlag{ - Name: "http-log-max-backups", - Value: 3, - EnvVar: "BLAST_DISTRIBUTOR_HTTP_LOG_MAX_BACKUPS", - Usage: "Max backup count of HTTP access log files", - }, - cli.IntFlag{ - Name: "http-log-max-age", - Value: 30, - EnvVar: "BLAST_DISTRIBUTOR_HTTP_LOG_MAX_AGE", - Usage: "Max age of a HTTP access log file (days)", - }, - cli.BoolFlag{ - Name: "http-log-compress", - EnvVar: "BLAST_DISTRIBUTOR_HTTP_LOG_COMPRESS", - Usage: "Compress a HTTP access log", - }, - }, - Action: distributorNodeStart, - }, { Name: "health", - Usage: "Health check", + Usage: "Health check the node", Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", @@ -995,7 +995,7 @@ func main() { Usage: "Readiness probe", }, }, - Action: distributorNodeHealth, + Action: dispatcherNodeHealth, }, }, }, @@ -1015,7 +1015,7 @@ func main() { }, }, ArgsUsage: "[document IDs]", - Action: distributorGet, + Action: dispatcherGet, }, { Name: "index", @@ -1037,7 +1037,7 @@ func main() { }, }, ArgsUsage: "[document ID] [document fields]", - Action: distributorIndex, + Action: dispatcherIndex, }, { Name: "delete", @@ -1055,7 +1055,7 @@ func main() { }, }, ArgsUsage: "[document IDs]", - Action: distributorDelete, + Action: dispatcherDelete, }, { Name: "search", @@ -1073,7 +1073,7 @@ func main() { }, }, ArgsUsage: "[search request]", - Action: distributorSearch, + Action: dispatcherSearch, }, }, }, diff --git a/cmd/blast/cluster_peers_info.go b/cmd/blast/manager_cluster_info.go similarity index 96% rename from cmd/blast/cluster_peers_info.go rename to cmd/blast/manager_cluster_info.go index 6d4ea48..8e4589c 100644 --- a/cmd/blast/cluster_peers_info.go +++ b/cmd/blast/manager_cluster_info.go @@ -23,7 +23,7 @@ import ( "github.com/urfave/cli" ) -func clusterPeersInfo(c *cli.Context) error { +func managerClusterInfo(c *cli.Context) error { grpcAddr := c.String("grpc-address") client, err := manager.NewGRPCClient(grpcAddr) diff --git a/cmd/blast/cluster_node_leave.go b/cmd/blast/manager_cluster_leave.go similarity index 81% rename from cmd/blast/cluster_node_leave.go rename to cmd/blast/manager_cluster_leave.go index 8250238..5082304 100644 --- a/cmd/blast/cluster_node_leave.go +++ b/cmd/blast/manager_cluster_leave.go @@ -22,14 +22,10 @@ import ( "github.com/urfave/cli" ) -func clusterNodeLeave(c *cli.Context) error { - clusterGrpcAddr := c.String("cluster-grpc-address") - shardId := c.String("shard-id") +func managerClusterLeave(c *cli.Context) error { peerGrpcAddr := c.String("peer-grpc-address") - if clusterGrpcAddr != "" && shardId != "" { - // get grpc address of leader node - } else if peerGrpcAddr != "" { + if peerGrpcAddr != "" { // get grpc address of leader node } diff --git a/cmd/blast/cluster_peers_watch.go b/cmd/blast/manager_cluster_watch.go similarity index 95% rename from cmd/blast/cluster_peers_watch.go rename to cmd/blast/manager_cluster_watch.go index 59d5d35..b8d4277 100644 --- a/cmd/blast/cluster_peers_watch.go +++ b/cmd/blast/manager_cluster_watch.go @@ -27,7 +27,7 @@ import ( "github.com/urfave/cli" ) -func clusterPeersWatch(c *cli.Context) error { +func managerClusterWatch(c *cli.Context) error { grpcAddr := c.String("grpc-address") client, err := manager.NewGRPCClient(grpcAddr) @@ -41,7 +41,7 @@ func clusterPeersWatch(c *cli.Context) error { } }() - err = clusterPeersInfo(c) + err = managerClusterInfo(c) if err != nil { return err } diff --git a/cmd/blast/cluster_delete.go b/cmd/blast/manager_delete.go similarity index 96% rename from cmd/blast/cluster_delete.go rename to cmd/blast/manager_delete.go index 600c70f..60c273b 100644 --- a/cmd/blast/cluster_delete.go +++ b/cmd/blast/manager_delete.go @@ -23,7 +23,7 @@ import ( "github.com/urfave/cli" ) -func clusterDelete(c *cli.Context) error { +func managerDelete(c *cli.Context) error { grpcAddr := c.String("grpc-address") key := c.Args().Get(0) diff --git a/cmd/blast/cluster_get.go b/cmd/blast/manager_get.go similarity index 96% rename from cmd/blast/cluster_get.go rename to cmd/blast/manager_get.go index fff65e3..46f0228 100644 --- a/cmd/blast/cluster_get.go +++ b/cmd/blast/manager_get.go @@ -23,7 +23,7 @@ import ( "github.com/urfave/cli" ) -func clusterGet(c *cli.Context) error { +func managerGet(c *cli.Context) error { grpcAddr := c.String("grpc-address") key := c.Args().Get(0) diff --git a/cmd/blast/cluster_node_health.go b/cmd/blast/manager_node_health.go similarity index 97% rename from cmd/blast/cluster_node_health.go rename to cmd/blast/manager_node_health.go index 39294b7..0f4d863 100644 --- a/cmd/blast/cluster_node_health.go +++ b/cmd/blast/manager_node_health.go @@ -22,7 +22,7 @@ import ( "github.com/urfave/cli" ) -func clusterNodeHealth(c *cli.Context) error { +func managerNodeHealth(c *cli.Context) error { grpcAddr := c.String("grpc-address") liveness := c.Bool("liveness") readiness := c.Bool("readiness") diff --git a/cmd/blast/cluster_node_info.go b/cmd/blast/manager_node_info.go similarity index 83% rename from cmd/blast/cluster_node_info.go rename to cmd/blast/manager_node_info.go index 517c72e..f399b30 100644 --- a/cmd/blast/cluster_node_info.go +++ b/cmd/blast/manager_node_info.go @@ -23,21 +23,15 @@ import ( "github.com/urfave/cli" ) -func clusterNodeInfo(c *cli.Context) error { - clusterGrpcAddr := c.String("cluster-grpc-address") - shardId := c.String("shard-id") +func managerNodeInfo(c *cli.Context) error { peerGrpcAddr := c.String("peer-grpc-address") + nodeId := c.String("node-id") + grpcAddr := c.String("grpc-address") - if clusterGrpcAddr != "" && shardId != "" { - - } else if peerGrpcAddr != "" { + if peerGrpcAddr != "" { } - grpcAddr := c.String("grpc-address") - - nodeId := c.Args().Get(0) - client, err := manager.NewGRPCClient(grpcAddr) if err != nil { return err diff --git a/cmd/blast/cluster_set.go b/cmd/blast/manager_set.go similarity index 97% rename from cmd/blast/cluster_set.go rename to cmd/blast/manager_set.go index 55e51f0..86542bc 100644 --- a/cmd/blast/cluster_set.go +++ b/cmd/blast/manager_set.go @@ -24,7 +24,7 @@ import ( "github.com/urfave/cli" ) -func clusterSet(c *cli.Context) error { +func managerSet(c *cli.Context) error { grpcAddr := c.String("grpc-address") key := c.Args().Get(0) diff --git a/cmd/blast/cluster_node_snapshot.go b/cmd/blast/manager_snapshot.go similarity index 95% rename from cmd/blast/cluster_node_snapshot.go rename to cmd/blast/manager_snapshot.go index ce9d62f..8dd9b71 100644 --- a/cmd/blast/cluster_node_snapshot.go +++ b/cmd/blast/manager_snapshot.go @@ -22,7 +22,7 @@ import ( "github.com/urfave/cli" ) -func clusterNodeSnapshot(c *cli.Context) error { +func managerSnapshot(c *cli.Context) error { grpcAddr := c.String("grpc-address") client, err := manager.NewGRPCClient(grpcAddr) diff --git a/cmd/blast/cluster_node_start.go b/cmd/blast/manager_start.go similarity index 98% rename from cmd/blast/cluster_node_start.go rename to cmd/blast/manager_start.go index feb56c2..e53a707 100644 --- a/cmd/blast/cluster_node_start.go +++ b/cmd/blast/manager_start.go @@ -27,7 +27,7 @@ import ( "github.com/urfave/cli" ) -func clusterNodeStart(c *cli.Context) error { +func managerStart(c *cli.Context) error { peerGrpcAddr := c.String("peer-grpc-address") grpcAddr := c.String("grpc-address") diff --git a/cmd/blast/cluster_watch.go b/cmd/blast/manager_watch.go similarity index 97% rename from cmd/blast/cluster_watch.go rename to cmd/blast/manager_watch.go index 71d2dc2..024cbbb 100644 --- a/cmd/blast/cluster_watch.go +++ b/cmd/blast/manager_watch.go @@ -27,7 +27,7 @@ import ( "github.com/urfave/cli" ) -func clusterWatch(c *cli.Context) error { +func managerWatch(c *cli.Context) error { grpcAddr := c.String("grpc-address") key := c.Args().Get(0) diff --git a/example/wiki_bulk_index.txt b/example/wiki_bulk_index.jsonl similarity index 100% rename from example/wiki_bulk_index.txt rename to example/wiki_bulk_index.jsonl From ae406b92a101cf3eaa47a1047b7c018fd2209be6 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Sat, 27 Jul 2019 02:18:23 +0900 Subject: [PATCH 06/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index ef4da28..3731fd2 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -14,6 +14,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - New CLI #82 - Split protobuf into components #84 +- Change subcommands #85 ## [v0.7.1] - 2019-07-18 From dafa34b694178fd3250e0583297e0864cb5c6199 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Mon, 29 Jul 2019 14:04:31 +0900 Subject: [PATCH 07/76] Update protobuf (#86) --- cmd/blast/main.go | 10 +- cmd/blast/manager_cluster_info.go | 2 +- cmd/blast/manager_cluster_leave.go | 2 +- cmd/blast/manager_cluster_watch.go | 2 +- cmd/blast/manager_delete.go | 2 +- cmd/blast/manager_get.go | 2 +- cmd/blast/manager_node_health.go | 42 +- cmd/blast/manager_node_info.go | 2 +- cmd/blast/manager_set.go | 2 +- cmd/blast/manager_watch.go | 2 +- dispatcher/grpc_service.go | 10 +- dispatcher/server_test.go | 2 +- indexer/grpc_service.go | 6 +- indexer/server.go | 4 +- manager/grpc_client.go | 96 ++- manager/grpc_service.go | 84 ++- manager/http_router.go | 6 +- manager/server.go | 2 +- manager/server_test.go | 390 ++++------- protobuf/management/management.pb.go | 994 +++++++++++++-------------- protobuf/management/management.proto | 78 +-- 21 files changed, 807 insertions(+), 933 deletions(-) diff --git a/cmd/blast/main.go b/cmd/blast/main.go index cdf357e..42adee0 100644 --- a/cmd/blast/main.go +++ b/cmd/blast/main.go @@ -234,14 +234,18 @@ func main() { Action: managerNodeInfo, }, { - Name: "health", + Name: "healthcheck", Usage: "Health check the node", Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5100", Usage: "The gRPC listen address", }, + cli.BoolFlag{ + Name: "healthiness", + Usage: "healthiness probe", + }, cli.BoolFlag{ Name: "liveness", Usage: "Liveness probe", @@ -251,7 +255,7 @@ func main() { Usage: "Readiness probe", }, }, - Action: managerNodeHealth, + Action: managerNodeHealthCheck, }, }, }, diff --git a/cmd/blast/manager_cluster_info.go b/cmd/blast/manager_cluster_info.go index 8e4589c..2ccc08d 100644 --- a/cmd/blast/manager_cluster_info.go +++ b/cmd/blast/manager_cluster_info.go @@ -37,7 +37,7 @@ func managerClusterInfo(c *cli.Context) error { } }() - cluster, err := client.GetCluster() + cluster, err := client.ClusterInfo() if err != nil { return err } diff --git a/cmd/blast/manager_cluster_leave.go b/cmd/blast/manager_cluster_leave.go index 5082304..b50a277 100644 --- a/cmd/blast/manager_cluster_leave.go +++ b/cmd/blast/manager_cluster_leave.go @@ -43,7 +43,7 @@ func managerClusterLeave(c *cli.Context) error { } }() - err = client.DeleteNode(nodeId) + err = client.ClusterLeave(nodeId) if err != nil { return err } diff --git a/cmd/blast/manager_cluster_watch.go b/cmd/blast/manager_cluster_watch.go index b8d4277..8bef44b 100644 --- a/cmd/blast/manager_cluster_watch.go +++ b/cmd/blast/manager_cluster_watch.go @@ -46,7 +46,7 @@ func managerClusterWatch(c *cli.Context) error { return err } - watchClient, err := client.WatchCluster() + watchClient, err := client.ClusterWatch() if err != nil { return err } diff --git a/cmd/blast/manager_delete.go b/cmd/blast/manager_delete.go index 60c273b..e6c41e6 100644 --- a/cmd/blast/manager_delete.go +++ b/cmd/blast/manager_delete.go @@ -43,7 +43,7 @@ func managerDelete(c *cli.Context) error { } }() - err = client.DeleteValue(key) + err = client.Delete(key) if err != nil { return err } diff --git a/cmd/blast/manager_get.go b/cmd/blast/manager_get.go index 46f0228..f0e3fe7 100644 --- a/cmd/blast/manager_get.go +++ b/cmd/blast/manager_get.go @@ -39,7 +39,7 @@ func managerGet(c *cli.Context) error { } }() - value, err := client.GetValue(key) + value, err := client.Get(key) if err != nil { return err } diff --git a/cmd/blast/manager_node_health.go b/cmd/blast/manager_node_health.go index 0f4d863..9967f91 100644 --- a/cmd/blast/manager_node_health.go +++ b/cmd/blast/manager_node_health.go @@ -19,11 +19,13 @@ import ( "os" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf/management" "github.com/urfave/cli" ) -func managerNodeHealth(c *cli.Context) error { +func managerNodeHealthCheck(c *cli.Context) error { grpcAddr := c.String("grpc-address") + healthiness := c.Bool("healthiness") liveness := c.Bool("liveness") readiness := c.Bool("readiness") @@ -38,34 +40,30 @@ func managerNodeHealth(c *cli.Context) error { } }() - if !liveness && !readiness { - LivenessState, err := client.LivenessProbe() + var state string + if healthiness { + state, err = client.NodeHealthCheck(management.NodeHealthCheckRequest_HEALTHINESS.String()) if err != nil { - return err + state = management.NodeHealthCheckResponse_UNHEALTHY.String() } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", LivenessState)) - - readinessState, err := client.ReadinessProbe() + } else if liveness { + state, err = client.NodeHealthCheck(management.NodeHealthCheckRequest_LIVENESS.String()) if err != nil { - return err + state = management.NodeHealthCheckResponse_DEAD.String() } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", readinessState)) - } else { - if liveness { - state, err := client.LivenessProbe() - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + } else if readiness { + state, err = client.NodeHealthCheck(management.NodeHealthCheckRequest_READINESS.String()) + if err != nil { + state = management.NodeHealthCheckResponse_NOT_READY.String() } - if readiness { - state, err := client.ReadinessProbe() - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + } else { + state, err = client.NodeHealthCheck(management.NodeHealthCheckRequest_HEALTHINESS.String()) + if err != nil { + state = management.NodeHealthCheckResponse_UNHEALTHY.String() } } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + return nil } diff --git a/cmd/blast/manager_node_info.go b/cmd/blast/manager_node_info.go index f399b30..c7a206f 100644 --- a/cmd/blast/manager_node_info.go +++ b/cmd/blast/manager_node_info.go @@ -43,7 +43,7 @@ func managerNodeInfo(c *cli.Context) error { } }() - metadata, err := client.GetNode(nodeId) + metadata, err := client.NodeInfo(nodeId) if err != nil { return err } diff --git a/cmd/blast/manager_set.go b/cmd/blast/manager_set.go index 86542bc..02c3fa1 100644 --- a/cmd/blast/manager_set.go +++ b/cmd/blast/manager_set.go @@ -61,7 +61,7 @@ func managerSet(c *cli.Context) error { } }() - err = client.SetValue(key, value) + err = client.Set(key, value) if err != nil { return err } diff --git a/cmd/blast/manager_watch.go b/cmd/blast/manager_watch.go index 024cbbb..273927b 100644 --- a/cmd/blast/manager_watch.go +++ b/cmd/blast/manager_watch.go @@ -43,7 +43,7 @@ func managerWatch(c *cli.Context) error { } }() - watchClient, err := client.WatchStore(key) + watchClient, err := client.Watch(key) if err != nil { return err } diff --git a/dispatcher/grpc_service.go b/dispatcher/grpc_service.go index 6f85872..df7d1ad 100644 --- a/dispatcher/grpc_service.go +++ b/dispatcher/grpc_service.go @@ -137,7 +137,7 @@ func (s *GRPCService) getInitialManagers(managerAddr string) (map[string]interfa return nil, err } - managers, err := client.GetCluster() + managers, err := client.ClusterInfo() if err != nil { s.logger.Error(err.Error()) return nil, err @@ -207,7 +207,7 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { } // create stream - stream, err := client.WatchCluster() + stream, err := client.ClusterWatch() if err != nil { s.logger.Error(err.Error()) continue @@ -360,7 +360,7 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { } // get initial indexers - clusters, err := client.GetValue("/cluster_config/clusters/") + clusters, err := client.Get("/cluster_config/clusters/") if err != nil { s.logger.Error(err.Error()) } @@ -426,7 +426,7 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { continue } - stream, err := client.WatchStore("/cluster_config/clusters/") + stream, err := client.Watch("/cluster_config/clusters/") if err != nil { s.logger.Error(err.Error()) continue @@ -443,7 +443,7 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { } s.logger.Debug("data has changed", zap.String("key", resp.Key)) - cluster, err := client.GetValue("/cluster_config/clusters/") + cluster, err := client.Get("/cluster_config/clusters/") if err != nil { s.logger.Error(err.Error()) continue diff --git a/dispatcher/server_test.go b/dispatcher/server_test.go index a6f7d86..c762b2d 100644 --- a/dispatcher/server_test.go +++ b/dispatcher/server_test.go @@ -127,7 +127,7 @@ func TestServer_Start(t *testing.T) { t.Fatalf("%v", err) } // get cluster info from manager1 - managerCluster1, err := managerClient1.GetCluster() + managerCluster1, err := managerClient1.ClusterInfo() if err != nil { t.Fatalf("%v", err) } diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go index 0b56280..20971a7 100644 --- a/indexer/grpc_service.go +++ b/indexer/grpc_service.go @@ -147,7 +147,7 @@ func (s *GRPCService) getInitialManagers(managerAddr string) (map[string]interfa return nil, err } - managers, err := client.GetCluster() + managers, err := client.ClusterInfo() if err != nil { s.logger.Error(err.Error()) return nil, err @@ -216,7 +216,7 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { continue } - stream, err := client.WatchCluster() + stream, err := client.ClusterWatch() if err != nil { s.logger.Error(err.Error()) continue @@ -504,7 +504,7 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { if err != nil { s.logger.Error(err.Error()) } - err = client.SetValue(fmt.Sprintf("cluster_config/clusters/%s/nodes", s.clusterConfig.ClusterId), cluster) + err = client.Set(fmt.Sprintf("cluster_config/clusters/%s/nodes", s.clusterConfig.ClusterId), cluster) if err != nil { s.logger.Error(err.Error()) } diff --git a/indexer/server.go b/indexer/server.go index b60d444..8813a48 100644 --- a/indexer/server.go +++ b/indexer/server.go @@ -69,7 +69,7 @@ func (s *Server) Start() { return } - clusterIntr, err := mc.GetValue(fmt.Sprintf("cluster_config/clusters/%s/nodes", s.clusterConfig.ClusterId)) + clusterIntr, err := mc.Get(fmt.Sprintf("cluster_config/clusters/%s/nodes", s.clusterConfig.ClusterId)) if err != nil && err != errors.ErrNotFound { s.logger.Fatal(err.Error()) return @@ -122,7 +122,7 @@ func (s *Server) Start() { } s.logger.Debug("pull index config from manager", zap.String("address", mc.GetAddress())) - value, err := mc.GetValue("/index_config") + value, err := mc.Get("/index_config") if err != nil { s.logger.Fatal(err.Error()) return diff --git a/manager/grpc_client.go b/manager/grpc_client.go index aecc95a..cbb10c8 100644 --- a/manager/grpc_client.go +++ b/manager/grpc_client.go @@ -95,34 +95,58 @@ func (c *GRPCClient) GetAddress() string { return c.conn.Target() } -func (c *GRPCClient) LivenessProbe(opts ...grpc.CallOption) (string, error) { - resp, err := c.client.LivenessProbe(c.ctx, &empty.Empty{}) - if err != nil { - st, _ := status.FromError(err) +//func (c *GRPCClient) LivenessProbe(opts ...grpc.CallOption) (string, error) { +// resp, err := c.client.LivenessProbe(c.ctx, &empty.Empty{}) +// if err != nil { +// st, _ := status.FromError(err) +// +// return management.LivenessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) +// } +// +// return resp.State.String(), nil +//} - return management.LivenessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) +//func (c *GRPCClient) ReadinessProbe(opts ...grpc.CallOption) (string, error) { +// resp, err := c.client.ReadinessProbe(c.ctx, &empty.Empty{}) +// if err != nil { +// st, _ := status.FromError(err) +// +// return management.ReadinessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) +// } +// +// return resp.State.String(), nil +//} + +func (c *GRPCClient) NodeHealthCheck(probe string, opts ...grpc.CallOption) (string, error) { + req := &management.NodeHealthCheckRequest{} + + switch probe { + case management.NodeHealthCheckRequest_HEALTHINESS.String(): + req.Probe = management.NodeHealthCheckRequest_HEALTHINESS + case management.NodeHealthCheckRequest_LIVENESS.String(): + req.Probe = management.NodeHealthCheckRequest_LIVENESS + case management.NodeHealthCheckRequest_READINESS.String(): + req.Probe = management.NodeHealthCheckRequest_READINESS + default: + req.Probe = management.NodeHealthCheckRequest_HEALTHINESS } - return resp.State.String(), nil -} - -func (c *GRPCClient) ReadinessProbe(opts ...grpc.CallOption) (string, error) { - resp, err := c.client.ReadinessProbe(c.ctx, &empty.Empty{}) + resp, err := c.client.NodeHealthCheck(c.ctx, req, opts...) if err != nil { st, _ := status.FromError(err) - return management.ReadinessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) + return management.NodeHealthCheckResponse_UNHEALTHY.String(), errors.New(st.Message()) } return resp.State.String(), nil } -func (c *GRPCClient) GetNode(id string, opts ...grpc.CallOption) (map[string]interface{}, error) { - req := &management.GetNodeRequest{ +func (c *GRPCClient) NodeInfo(id string, opts ...grpc.CallOption) (map[string]interface{}, error) { + req := &management.NodeInfoRequest{ Id: id, } - resp, err := c.client.GetNode(c.ctx, req, opts...) + resp, err := c.client.NodeInfo(c.ctx, req, opts...) if err != nil { st, _ := status.FromError(err) @@ -140,19 +164,19 @@ func (c *GRPCClient) GetNode(id string, opts ...grpc.CallOption) (map[string]int return node, nil } -func (c *GRPCClient) SetNode(id string, nodeConfig map[string]interface{}, opts ...grpc.CallOption) error { +func (c *GRPCClient) ClusterJoin(id string, nodeConfig map[string]interface{}, opts ...grpc.CallOption) error { nodeConfigAny := &any.Any{} err := protobuf.UnmarshalAny(nodeConfig, nodeConfigAny) if err != nil { return err } - req := &management.SetNodeRequest{ + req := &management.ClusterJoinRequest{ Id: id, NodeConfig: nodeConfigAny, } - _, err = c.client.SetNode(c.ctx, req, opts...) + _, err = c.client.ClusterJoin(c.ctx, req, opts...) if err != nil { return err } @@ -160,12 +184,12 @@ func (c *GRPCClient) SetNode(id string, nodeConfig map[string]interface{}, opts return nil } -func (c *GRPCClient) DeleteNode(id string, opts ...grpc.CallOption) error { - req := &management.DeleteNodeRequest{ +func (c *GRPCClient) ClusterLeave(id string, opts ...grpc.CallOption) error { + req := &management.ClusterLeaveRequest{ Id: id, } - _, err := c.client.DeleteNode(c.ctx, req, opts...) + _, err := c.client.ClusterLeave(c.ctx, req, opts...) if err != nil { return err } @@ -173,8 +197,8 @@ func (c *GRPCClient) DeleteNode(id string, opts ...grpc.CallOption) error { return nil } -func (c *GRPCClient) GetCluster(opts ...grpc.CallOption) (map[string]interface{}, error) { - resp, err := c.client.GetCluster(c.ctx, &empty.Empty{}, opts...) +func (c *GRPCClient) ClusterInfo(opts ...grpc.CallOption) (map[string]interface{}, error) { + resp, err := c.client.ClusterInfo(c.ctx, &empty.Empty{}, opts...) if err != nil { st, _ := status.FromError(err) @@ -187,10 +211,10 @@ func (c *GRPCClient) GetCluster(opts ...grpc.CallOption) (map[string]interface{} return cluster, nil } -func (c *GRPCClient) WatchCluster(opts ...grpc.CallOption) (management.Management_WatchClusterClient, error) { +func (c *GRPCClient) ClusterWatch(opts ...grpc.CallOption) (management.Management_ClusterWatchClient, error) { req := &empty.Empty{} - watchClient, err := c.client.WatchCluster(c.ctx, req, opts...) + watchClient, err := c.client.ClusterWatch(c.ctx, req, opts...) if err != nil { st, _ := status.FromError(err) return nil, errors.New(st.Message()) @@ -199,12 +223,12 @@ func (c *GRPCClient) WatchCluster(opts ...grpc.CallOption) (management.Managemen return watchClient, nil } -func (c *GRPCClient) GetValue(key string, opts ...grpc.CallOption) (interface{}, error) { - req := &management.GetValueRequest{ +func (c *GRPCClient) Get(key string, opts ...grpc.CallOption) (interface{}, error) { + req := &management.GetRequest{ Key: key, } - resp, err := c.client.GetValue(c.ctx, req, opts...) + resp, err := c.client.Get(c.ctx, req, opts...) if err != nil { st, _ := status.FromError(err) @@ -221,19 +245,19 @@ func (c *GRPCClient) GetValue(key string, opts ...grpc.CallOption) (interface{}, return value, nil } -func (c *GRPCClient) SetValue(key string, value interface{}, opts ...grpc.CallOption) error { +func (c *GRPCClient) Set(key string, value interface{}, opts ...grpc.CallOption) error { valueAny := &any.Any{} err := protobuf.UnmarshalAny(value, valueAny) if err != nil { return err } - req := &management.SetValueRequest{ + req := &management.SetRequest{ Key: key, Value: valueAny, } - _, err = c.client.SetValue(c.ctx, req, opts...) + _, err = c.client.Set(c.ctx, req, opts...) if err != nil { st, _ := status.FromError(err) @@ -248,12 +272,12 @@ func (c *GRPCClient) SetValue(key string, value interface{}, opts ...grpc.CallOp return nil } -func (c *GRPCClient) DeleteValue(key string, opts ...grpc.CallOption) error { - req := &management.DeleteValueRequest{ +func (c *GRPCClient) Delete(key string, opts ...grpc.CallOption) error { + req := &management.DeleteRequest{ Key: key, } - _, err := c.client.DeleteValue(c.ctx, req, opts...) + _, err := c.client.Delete(c.ctx, req, opts...) if err != nil { st, _ := status.FromError(err) @@ -268,12 +292,12 @@ func (c *GRPCClient) DeleteValue(key string, opts ...grpc.CallOption) error { return nil } -func (c *GRPCClient) WatchStore(key string, opts ...grpc.CallOption) (management.Management_WatchStoreClient, error) { - req := &management.WatchStoreRequest{ +func (c *GRPCClient) Watch(key string, opts ...grpc.CallOption) (management.Management_WatchClient, error) { + req := &management.WatchRequest{ Key: key, } - watchClient, err := c.client.WatchStore(c.ctx, req, opts...) + watchClient, err := c.client.Watch(c.ctx, req, opts...) if err != nil { st, _ := status.FromError(err) return nil, errors.New(st.Message()) diff --git a/manager/grpc_service.go b/manager/grpc_service.go index e15848b..f9254df 100644 --- a/manager/grpc_service.go +++ b/manager/grpc_service.go @@ -44,10 +44,10 @@ type GRPCService struct { peers map[string]interface{} peerClients map[string]*GRPCClient cluster map[string]interface{} - clusterChans map[chan management.GetClusterResponse]struct{} + clusterChans map[chan management.ClusterInfoResponse]struct{} clusterMutex sync.RWMutex - stateChans map[chan management.WatchStoreResponse]struct{} + stateChans map[chan management.WatchResponse]struct{} stateMutex sync.RWMutex } @@ -59,9 +59,9 @@ func NewGRPCService(raftServer *RaftServer, logger *zap.Logger) (*GRPCService, e peers: make(map[string]interface{}, 0), peerClients: make(map[string]*GRPCClient, 0), cluster: make(map[string]interface{}, 0), - clusterChans: make(map[chan management.GetClusterResponse]struct{}), + clusterChans: make(map[chan management.ClusterInfoResponse]struct{}), - stateChans: make(map[chan management.WatchStoreResponse]struct{}), + stateChans: make(map[chan management.WatchResponse]struct{}), }, nil } @@ -220,7 +220,7 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { // notify current cluster if !reflect.DeepEqual(s.cluster, cluster) { // convert to GetClusterResponse for channel output - clusterResp := &management.GetClusterResponse{} + clusterResp := &management.ClusterInfoResponse{} clusterAny := &any.Any{} err = protobuf.UnmarshalAny(cluster, clusterAny) if err != nil { @@ -247,11 +247,10 @@ func (s *GRPCService) stopUpdateCluster() { s.logger.Info("close all peer clients") for id, client := range s.peerClients { s.logger.Debug("close peer client", zap.String("id", id), zap.String("address", client.GetAddress())) - _ = client.Close() - //err := client.Close() - //if err != nil { - // s.logger.Warn(err.Error()) - //} + err := client.Close() + if err != nil { + s.logger.Warn(err.Error()) + } } if s.updateClusterStopCh != nil { @@ -264,17 +263,16 @@ func (s *GRPCService) stopUpdateCluster() { s.logger.Info("the cluster update has been stopped") } -func (s *GRPCService) LivenessProbe(ctx context.Context, req *empty.Empty) (*management.LivenessProbeResponse, error) { - resp := &management.LivenessProbeResponse{ - State: management.LivenessProbeResponse_ALIVE, - } - - return resp, nil -} +func (s *GRPCService) NodeHealthCheck(ctx context.Context, req *management.NodeHealthCheckRequest) (*management.NodeHealthCheckResponse, error) { + resp := &management.NodeHealthCheckResponse{} -func (s *GRPCService) ReadinessProbe(ctx context.Context, req *empty.Empty) (*management.ReadinessProbeResponse, error) { - resp := &management.ReadinessProbeResponse{ - State: management.ReadinessProbeResponse_READY, + switch req.Probe { + case management.NodeHealthCheckRequest_HEALTHINESS: + resp.State = management.NodeHealthCheckResponse_HEALTHY + case management.NodeHealthCheckRequest_LIVENESS: + resp.State = management.NodeHealthCheckResponse_ALIVE + case management.NodeHealthCheckRequest_READINESS: + resp.State = management.NodeHealthCheckResponse_READY } return resp, nil @@ -296,7 +294,7 @@ func (s *GRPCService) getPeerNode(id string) (map[string]interface{}, error) { var err error if peerClient, exist := s.peerClients[id]; exist { - nodeInfo, err = peerClient.GetNode(id) + nodeInfo, err = peerClient.NodeInfo(id) if err != nil { s.logger.Warn(err.Error()) nodeInfo = map[string]interface{}{ @@ -333,8 +331,8 @@ func (s *GRPCService) getNode(id string) (map[string]interface{}, error) { return nodeInfo, nil } -func (s *GRPCService) GetNode(ctx context.Context, req *management.GetNodeRequest) (*management.GetNodeResponse, error) { - resp := &management.GetNodeResponse{} +func (s *GRPCService) NodeInfo(ctx context.Context, req *management.NodeInfoRequest) (*management.NodeInfoResponse, error) { + resp := &management.NodeInfoResponse{} nodeInfo, err := s.getNode(req.Id) if err != nil { @@ -379,7 +377,7 @@ func (s *GRPCService) setNode(id string, nodeConfig map[string]interface{}) erro s.logger.Error(err.Error()) return err } - err = client.SetNode(id, nodeConfig) + err = client.ClusterJoin(id, nodeConfig) if err != nil { s.logger.Error(err.Error()) return err @@ -389,7 +387,7 @@ func (s *GRPCService) setNode(id string, nodeConfig map[string]interface{}) erro return nil } -func (s *GRPCService) SetNode(ctx context.Context, req *management.SetNodeRequest) (*empty.Empty, error) { +func (s *GRPCService) ClusterJoin(ctx context.Context, req *management.ClusterJoinRequest) (*empty.Empty, error) { resp := &empty.Empty{} ins, err := protobuf.MarshalAny(req.NodeConfig) @@ -423,7 +421,7 @@ func (s *GRPCService) deleteNode(id string) error { s.logger.Error(err.Error()) return err } - err = client.DeleteNode(id) + err = client.ClusterLeave(id) if err != nil { s.logger.Error(err.Error()) return err @@ -433,7 +431,7 @@ func (s *GRPCService) deleteNode(id string) error { return nil } -func (s *GRPCService) DeleteNode(ctx context.Context, req *management.DeleteNodeRequest) (*empty.Empty, error) { +func (s *GRPCService) ClusterLeave(ctx context.Context, req *management.ClusterLeaveRequest) (*empty.Empty, error) { resp := &empty.Empty{} err := s.deleteNode(req.Id) @@ -470,8 +468,8 @@ func (s *GRPCService) getCluster() (map[string]interface{}, error) { return cluster, nil } -func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*management.GetClusterResponse, error) { - resp := &management.GetClusterResponse{} +func (s *GRPCService) ClusterInfo(ctx context.Context, req *empty.Empty) (*management.ClusterInfoResponse, error) { + resp := &management.ClusterInfoResponse{} cluster, err := s.getCluster() if err != nil { @@ -491,8 +489,8 @@ func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*manage return resp, nil } -func (s *GRPCService) WatchCluster(req *empty.Empty, server management.Management_WatchClusterServer) error { - chans := make(chan management.GetClusterResponse) +func (s *GRPCService) ClusterWatch(req *empty.Empty, server management.Management_ClusterWatchServer) error { + chans := make(chan management.ClusterInfoResponse) s.clusterMutex.Lock() s.clusterChans[chans] = struct{}{} @@ -516,13 +514,13 @@ func (s *GRPCService) WatchCluster(req *empty.Empty, server management.Managemen return nil } -func (s *GRPCService) GetValue(ctx context.Context, req *management.GetValueRequest) (*management.GetValueResponse, error) { +func (s *GRPCService) Get(ctx context.Context, req *management.GetRequest) (*management.GetResponse, error) { s.stateMutex.RLock() defer func() { s.stateMutex.RUnlock() }() - resp := &management.GetValueResponse{} + resp := &management.GetResponse{} value, err := s.raftServer.GetValue(req.Key) if err != nil { @@ -547,7 +545,7 @@ func (s *GRPCService) GetValue(ctx context.Context, req *management.GetValueRequ return resp, nil } -func (s *GRPCService) SetValue(ctx context.Context, req *management.SetValueRequest) (*empty.Empty, error) { +func (s *GRPCService) Set(ctx context.Context, req *management.SetRequest) (*empty.Empty, error) { s.stateMutex.Lock() defer func() { s.stateMutex.Unlock() @@ -579,7 +577,7 @@ func (s *GRPCService) SetValue(ctx context.Context, req *management.SetValueRequ s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } - err = client.SetValue(req.Key, value) + err = client.Set(req.Key, value) if err != nil { s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) @@ -588,8 +586,8 @@ func (s *GRPCService) SetValue(ctx context.Context, req *management.SetValueRequ // notify for c := range s.stateChans { - c <- management.WatchStoreResponse{ - Command: management.WatchStoreResponse_SET, + c <- management.WatchResponse{ + Command: management.WatchResponse_SET, Key: req.Key, Value: req.Value, } @@ -598,7 +596,7 @@ func (s *GRPCService) SetValue(ctx context.Context, req *management.SetValueRequ return resp, nil } -func (s *GRPCService) DeleteValue(ctx context.Context, req *management.DeleteValueRequest) (*empty.Empty, error) { +func (s *GRPCService) Delete(ctx context.Context, req *management.DeleteRequest) (*empty.Empty, error) { s.stateMutex.Lock() defer func() { s.stateMutex.Unlock() @@ -624,7 +622,7 @@ func (s *GRPCService) DeleteValue(ctx context.Context, req *management.DeleteVal s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } - err = client.DeleteValue(req.Key) + err = client.Delete(req.Key) if err != nil { s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) @@ -633,8 +631,8 @@ func (s *GRPCService) DeleteValue(ctx context.Context, req *management.DeleteVal // notify for c := range s.stateChans { - c <- management.WatchStoreResponse{ - Command: management.WatchStoreResponse_DELETE, + c <- management.WatchResponse{ + Command: management.WatchResponse_DELETE, Key: req.Key, } } @@ -642,8 +640,8 @@ func (s *GRPCService) DeleteValue(ctx context.Context, req *management.DeleteVal return resp, nil } -func (s *GRPCService) WatchStore(req *management.WatchStoreRequest, server management.Management_WatchStoreServer) error { - chans := make(chan management.WatchStoreResponse) +func (s *GRPCService) Watch(req *management.WatchRequest, server management.Management_WatchServer) error { + chans := make(chan management.WatchResponse) s.stateMutex.Lock() s.stateChans[chans] = struct{}{} diff --git a/manager/http_router.go b/manager/http_router.go index 5ccea8c..be7ca13 100644 --- a/manager/http_router.go +++ b/manager/http_router.go @@ -124,7 +124,7 @@ func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { key := vars["path"] - value, err := h.client.GetValue(key) + value, err := h.client.Get(key) if err != nil { switch err { case blasterrors.ErrNotFound: @@ -230,7 +230,7 @@ func (h *PutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - err = h.client.SetValue(key, value) + err = h.client.Set(key, value) if err != nil { status = http.StatusInternalServerError @@ -274,7 +274,7 @@ func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { key := vars["path"] - err := h.client.DeleteValue(key) + err := h.client.Delete(key) if err != nil { status = http.StatusInternalServerError diff --git a/manager/server.go b/manager/server.go index 8dae8d5..b7ebeb1 100644 --- a/manager/server.go +++ b/manager/server.go @@ -136,7 +136,7 @@ func (s *Server) Start() { return } - err = client.SetNode(s.nodeConfig.NodeId, s.nodeConfig.ToMap()) + err = client.ClusterJoin(s.nodeConfig.NodeId, s.nodeConfig.ToMap()) if err != nil { s.logger.Fatal(err.Error()) return diff --git a/manager/server_test.go b/manager/server_test.go index 9aca154..b4bb963 100644 --- a/manager/server_test.go +++ b/manager/server_test.go @@ -75,7 +75,7 @@ func TestServer_Start(t *testing.T) { time.Sleep(5 * time.Second) } -func TestServer_LivenessProbe(t *testing.T) { +func TestServer_HealthCheck(t *testing.T) { curDir, _ := os.Getwd() // create logger @@ -133,82 +133,34 @@ func TestServer_LivenessProbe(t *testing.T) { t.Fatalf("%v", err) } - // liveness - liveness, err := client.LivenessProbe() + // healthiness + healthiness, err := client.NodeHealthCheck(management.NodeHealthCheckRequest_HEALTHINESS.String()) if err != nil { t.Fatalf("%v", err) } - expLiveness := management.LivenessProbeResponse_ALIVE.String() - actLiveness := liveness - if expLiveness != actLiveness { - t.Fatalf("expected content to see %v, saw %v", expLiveness, actLiveness) + expHealthiness := management.NodeHealthCheckResponse_HEALTHY.String() + actHealthiness := healthiness + if expHealthiness != actHealthiness { + t.Fatalf("expected content to see %v, saw %v", expHealthiness, actHealthiness) } -} - -func TestServer_ReadinessProbe(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() + // liveness + liveness, err := client.NodeHealthCheck(management.NodeHealthCheckRequest_LIVENESS.String()) if err != nil { t.Fatalf("%v", err) } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) + expLiveness := management.NodeHealthCheckResponse_ALIVE.String() + actLiveness := liveness + if expLiveness != actLiveness { + t.Fatalf("expected content to see %v, saw %v", expLiveness, actLiveness) } // readiness - readiness, err := client.ReadinessProbe() + readiness, err := client.NodeHealthCheck(management.NodeHealthCheckRequest_READINESS.String()) if err != nil { t.Fatalf("%v", err) } - expReadiness := management.ReadinessProbeResponse_READY.String() + expReadiness := management.NodeHealthCheckResponse_READY.String() actReadiness := readiness if expReadiness != actReadiness { t.Fatalf("expected content to see %v, saw %v", expReadiness, actReadiness) @@ -274,7 +226,7 @@ func TestServer_GetNode(t *testing.T) { } // get node - nodeInfo, err := client.GetNode(nodeConfig.NodeId) + nodeInfo, err := client.NodeInfo(nodeConfig.NodeId) if err != nil { t.Fatalf("%v", err) } @@ -347,7 +299,7 @@ func TestServer_GetCluster(t *testing.T) { } // get cluster - cluster, err := client.GetCluster() + cluster, err := client.ClusterInfo() if err != nil { t.Fatalf("%v", err) } @@ -426,7 +378,7 @@ func TestServer_GetIndexMapping(t *testing.T) { t.Fatalf("%v", err) } - actIntr, err := client.GetValue("index_config/index_mapping") + actIntr, err := client.Get("index_config/index_mapping") if err != nil { t.Fatalf("%v", err) } @@ -504,7 +456,7 @@ func TestServer_GetIndexType(t *testing.T) { t.Fatalf("%v", err) } - actIndexType, err := client.GetValue("index_config/index_type") + actIndexType, err := client.Get("index_config/index_type") if err != nil { t.Fatalf("%v", err) } @@ -577,7 +529,7 @@ func TestServer_GetIndexStorageType(t *testing.T) { t.Fatalf("%v", err) } - actIndexStorageType, err := client.GetValue("index_config/index_storage_type") + actIndexStorageType, err := client.Get("index_config/index_storage_type") if err != nil { t.Fatalf("%v", err) } @@ -646,13 +598,13 @@ func TestServer_SetState(t *testing.T) { } // set value - err = client.SetValue("test/key1", "val1") + err = client.Set("test/key1", "val1") if err != nil { t.Fatalf("%v", err) } // get value - val1, err := client.GetValue("test/key1") + val1, err := client.Get("test/key1") if err != nil { t.Fatalf("%v", err) } @@ -725,13 +677,13 @@ func TestServer_GetState(t *testing.T) { } // set value - err = client.SetValue("test/key1", "val1") + err = client.Set("test/key1", "val1") if err != nil { t.Fatalf("%v", err) } // get value - val1, err := client.GetValue("test/key1") + val1, err := client.Get("test/key1") if err != nil { t.Fatalf("%v", err) } @@ -804,13 +756,13 @@ func TestServer_DeleteState(t *testing.T) { } // set value - err = client.SetValue("test/key1", "val1") + err = client.Set("test/key1", "val1") if err != nil { t.Fatalf("%v", err) } // get value - val1, err := client.GetValue("test/key1") + val1, err := client.Get("test/key1") if err != nil { t.Fatalf("%v", err) } @@ -824,12 +776,12 @@ func TestServer_DeleteState(t *testing.T) { } // delete value - err = client.DeleteValue("test/key1") + err = client.Delete("test/key1") if err != nil { t.Fatalf("%v", err) } - val1, err = client.GetValue("test/key1") + val1, err = client.Get("test/key1") if err != blasterrors.ErrNotFound { t.Fatalf("%v", err) } @@ -839,7 +791,7 @@ func TestServer_DeleteState(t *testing.T) { } // delete non-existing data - err = client.DeleteValue("test/non-existing") + err = client.Delete("test/non-existing") if err != blasterrors.ErrNotFound { t.Fatalf("%v", err) } @@ -926,7 +878,7 @@ func TestCluster_Start(t *testing.T) { time.Sleep(5 * time.Second) } -func TestCluster_LivenessProbe(t *testing.T) { +func TestCluster_HealthCheck(t *testing.T) { curDir, _ := os.Getwd() // create logger @@ -1029,175 +981,105 @@ func TestCluster_LivenessProbe(t *testing.T) { t.Fatalf("%v", err) } - // liveness check for manager1 - liveness1, err := client1.LivenessProbe() + // healthiness + healthiness1, err := client1.NodeHealthCheck(management.NodeHealthCheckRequest_HEALTHINESS.String()) if err != nil { t.Fatalf("%v", err) } - expLiveness1 := management.LivenessProbeResponse_ALIVE.String() - actLiveness1 := liveness1 - if expLiveness1 != actLiveness1 { - t.Fatalf("expected content to see %v, saw %v", expLiveness1, actLiveness1) + expHealthiness1 := management.NodeHealthCheckResponse_HEALTHY.String() + actHealthiness1 := healthiness1 + if expHealthiness1 != actHealthiness1 { + t.Fatalf("expected content to see %v, saw %v", expHealthiness1, actHealthiness1) } - // liveness check for manager2 - liveness2, err := client2.LivenessProbe() + // liveness + liveness1, err := client1.NodeHealthCheck(management.NodeHealthCheckRequest_LIVENESS.String()) if err != nil { t.Fatalf("%v", err) } - expLiveness2 := management.LivenessProbeResponse_ALIVE.String() - actLiveness2 := liveness2 - if expLiveness2 != actLiveness2 { - t.Fatalf("expected content to see %v, saw %v", expLiveness2, actLiveness2) + expLiveness1 := management.NodeHealthCheckResponse_ALIVE.String() + actLiveness1 := liveness1 + if expLiveness1 != actLiveness1 { + t.Fatalf("expected content to see %v, saw %v", expLiveness1, actLiveness1) } - // liveness check for manager3 - liveness3, err := client3.LivenessProbe() + // readiness + readiness1, err := client1.NodeHealthCheck(management.NodeHealthCheckRequest_READINESS.String()) if err != nil { t.Fatalf("%v", err) } - expLiveness3 := management.LivenessProbeResponse_ALIVE.String() - actLiveness3 := liveness3 - if expLiveness3 != actLiveness3 { - t.Fatalf("expected content to see %v, saw %v", expLiveness3, actLiveness3) + expReadiness1 := management.NodeHealthCheckResponse_READY.String() + actReadiness1 := readiness1 + if expReadiness1 != actReadiness1 { + t.Fatalf("expected content to see %v, saw %v", expReadiness1, actReadiness1) } -} -func TestCluster_ReadinessProbe(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + // healthiness + healthiness2, err := client2.NodeHealthCheck(management.NodeHealthCheckRequest_HEALTHINESS.String()) if err != nil { t.Fatalf("%v", err) } - - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("manager1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) + expHealthiness2 := management.NodeHealthCheckResponse_HEALTHY.String() + actHealthiness2 := healthiness2 + if expHealthiness2 != actHealthiness2 { + t.Fatalf("expected content to see %v, saw %v", expHealthiness2, actHealthiness2) } - // start server1 - server1.Start() - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("manager2"), grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() + // liveness + liveness2, err := client2.NodeHealthCheck(management.NodeHealthCheckRequest_LIVENESS.String()) if err != nil { t.Fatalf("%v", err) } - // start server2 - server2.Start() - - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("manager3"), grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) + expLiveness2 := management.NodeHealthCheckResponse_ALIVE.String() + actLiveness2 := liveness2 + if expLiveness2 != actLiveness2 { + t.Fatalf("expected content to see %v, saw %v", expLiveness2, actLiveness2) } - // start server3 - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - // gRPC client for all servers - client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) - defer func() { - _ = client2.Close() - }() + // readiness + readiness2, err := client2.NodeHealthCheck(management.NodeHealthCheckRequest_READINESS.String()) if err != nil { t.Fatalf("%v", err) } - client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) + expReadiness2 := management.NodeHealthCheckResponse_READY.String() + actReadiness2 := readiness2 + if expReadiness2 != actReadiness2 { + t.Fatalf("expected content to see %v, saw %v", expReadiness2, actReadiness2) } - // readiness check for manager1 - readiness1, err := client1.ReadinessProbe() + // healthiness + healthiness3, err := client3.NodeHealthCheck(management.NodeHealthCheckRequest_HEALTHINESS.String()) if err != nil { t.Fatalf("%v", err) } - expReadiness1 := management.ReadinessProbeResponse_READY.String() - actReadiness1 := readiness1 - if expReadiness1 != actReadiness1 { - t.Fatalf("expected content to see %v, saw %v", expReadiness1, actReadiness1) + expHealthiness3 := management.NodeHealthCheckResponse_HEALTHY.String() + actHealthiness3 := healthiness3 + if expHealthiness3 != actHealthiness3 { + t.Fatalf("expected content to see %v, saw %v", expHealthiness3, actHealthiness3) } - // readiness check for manager2 - readiness2, err := client2.ReadinessProbe() + // liveness + liveness3, err := client3.NodeHealthCheck(management.NodeHealthCheckRequest_LIVENESS.String()) if err != nil { t.Fatalf("%v", err) } - expReadiness2 := management.ReadinessProbeResponse_READY.String() - actReadiness2 := readiness2 - if expReadiness2 != actReadiness2 { - t.Fatalf("expected content to see %v, saw %v", expReadiness2, actReadiness2) + expLiveness3 := management.NodeHealthCheckResponse_ALIVE.String() + actLiveness3 := liveness3 + if expLiveness3 != actLiveness3 { + t.Fatalf("expected content to see %v, saw %v", expLiveness3, actLiveness3) } - // readiness check for manager3 - readiness3, err := client3.ReadinessProbe() + // readiness + readiness3, err := client3.NodeHealthCheck(management.NodeHealthCheckRequest_READINESS.String()) if err != nil { t.Fatalf("%v", err) } - expReadiness3 := management.ReadinessProbeResponse_READY.String() + expReadiness3 := management.NodeHealthCheckResponse_READY.String() actReadiness3 := readiness3 if expReadiness3 != actReadiness3 { t.Fatalf("expected content to see %v, saw %v", expReadiness3, actReadiness3) } + } func TestCluster_GetNode(t *testing.T) { @@ -1304,7 +1186,7 @@ func TestCluster_GetNode(t *testing.T) { } // get all node info from all nodes - node11, err := client1.GetNode(nodeConfig1.NodeId) + node11, err := client1.NodeInfo(nodeConfig1.NodeId) if err != nil { t.Fatalf("%v", err) } @@ -1317,7 +1199,7 @@ func TestCluster_GetNode(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expNode11, actNode11) } - node12, err := client1.GetNode(nodeConfig2.NodeId) + node12, err := client1.NodeInfo(nodeConfig2.NodeId) if err != nil { t.Fatalf("%v", err) } @@ -1330,7 +1212,7 @@ func TestCluster_GetNode(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expNode12, actNode12) } - node13, err := client1.GetNode(nodeConfig3.NodeId) + node13, err := client1.NodeInfo(nodeConfig3.NodeId) if err != nil { t.Fatalf("%v", err) } @@ -1343,7 +1225,7 @@ func TestCluster_GetNode(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expNode13, actNode13) } - node21, err := client2.GetNode(nodeConfig1.NodeId) + node21, err := client2.NodeInfo(nodeConfig1.NodeId) if err != nil { t.Fatalf("%v", err) } @@ -1356,7 +1238,7 @@ func TestCluster_GetNode(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expNode21, actNode21) } - node22, err := client2.GetNode(nodeConfig2.NodeId) + node22, err := client2.NodeInfo(nodeConfig2.NodeId) if err != nil { t.Fatalf("%v", err) } @@ -1369,7 +1251,7 @@ func TestCluster_GetNode(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expNode22, actNode22) } - node23, err := client2.GetNode(nodeConfig3.NodeId) + node23, err := client2.NodeInfo(nodeConfig3.NodeId) if err != nil { t.Fatalf("%v", err) } @@ -1382,7 +1264,7 @@ func TestCluster_GetNode(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expNode23, actNode23) } - node31, err := client3.GetNode(nodeConfig1.NodeId) + node31, err := client3.NodeInfo(nodeConfig1.NodeId) if err != nil { t.Fatalf("%v", err) } @@ -1395,7 +1277,7 @@ func TestCluster_GetNode(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expNode31, actNode31) } - node32, err := client3.GetNode(nodeConfig2.NodeId) + node32, err := client3.NodeInfo(nodeConfig2.NodeId) if err != nil { t.Fatalf("%v", err) } @@ -1408,7 +1290,7 @@ func TestCluster_GetNode(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expNode32, actNode32) } - node33, err := client3.GetNode(nodeConfig3.NodeId) + node33, err := client3.NodeInfo(nodeConfig3.NodeId) if err != nil { t.Fatalf("%v", err) } @@ -1526,7 +1408,7 @@ func TestCluster_GetCluster(t *testing.T) { } // get cluster info from manager1 - cluster1, err := client1.GetCluster() + cluster1, err := client1.ClusterInfo() if err != nil { t.Fatalf("%v", err) } @@ -1549,7 +1431,7 @@ func TestCluster_GetCluster(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expCluster1, actCluster1) } - cluster2, err := client2.GetCluster() + cluster2, err := client2.ClusterInfo() if err != nil { t.Fatalf("%v", err) } @@ -1572,7 +1454,7 @@ func TestCluster_GetCluster(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expCluster2, actCluster2) } - cluster3, err := client3.GetCluster() + cluster3, err := client3.ClusterInfo() if err != nil { t.Fatalf("%v", err) } @@ -1700,7 +1582,7 @@ func TestCluster_GetState(t *testing.T) { } // get index mapping from all nodes - indexConfig1, err := client1.GetValue("index_config") + indexConfig1, err := client1.Get("index_config") if err != nil { t.Fatalf("%v", err) } @@ -1710,7 +1592,7 @@ func TestCluster_GetState(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expIndexConfig1, actIndexConfig1) } - indexConfig2, err := client2.GetValue("index_config") + indexConfig2, err := client2.Get("index_config") if err != nil { t.Fatalf("%v", err) } @@ -1720,7 +1602,7 @@ func TestCluster_GetState(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expIndexConfig2, actIndexConfig2) } - indexConfig3, err := client3.GetValue("index_config") + indexConfig3, err := client3.Get("index_config") if err != nil { t.Fatalf("%v", err) } @@ -1834,14 +1716,14 @@ func TestCluster_SetState(t *testing.T) { t.Fatalf("%v", err) } - err = client1.SetValue("test/key1", "val1") + err = client1.Set("test/key1", "val1") if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val11, err := client1.GetValue("test/key1") + val11, err := client1.Get("test/key1") if err != nil { t.Fatalf("%v", err) } @@ -1850,7 +1732,7 @@ func TestCluster_SetState(t *testing.T) { if expVal11 != actVal11 { t.Fatalf("expected content to see %v, saw %v", expVal11, actVal11) } - val21, err := client2.GetValue("test/key1") + val21, err := client2.Get("test/key1") if err != nil { t.Fatalf("%v", err) } @@ -1859,7 +1741,7 @@ func TestCluster_SetState(t *testing.T) { if expVal21 != actVal21 { t.Fatalf("expected content to see %v, saw %v", expVal21, actVal21) } - val31, err := client3.GetValue("test/key1") + val31, err := client3.Get("test/key1") if err != nil { t.Fatalf("%v", err) } @@ -1869,14 +1751,14 @@ func TestCluster_SetState(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expVal31, actVal31) } - err = client2.SetValue("test/key2", "val2") + err = client2.Set("test/key2", "val2") if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val12, err := client1.GetValue("test/key2") + val12, err := client1.Get("test/key2") if err != nil { t.Fatalf("%v", err) } @@ -1885,7 +1767,7 @@ func TestCluster_SetState(t *testing.T) { if expVal12 != actVal12 { t.Fatalf("expected content to see %v, saw %v", expVal12, actVal12) } - val22, err := client2.GetValue("test/key2") + val22, err := client2.Get("test/key2") if err != nil { t.Fatalf("%v", err) } @@ -1894,7 +1776,7 @@ func TestCluster_SetState(t *testing.T) { if expVal22 != actVal22 { t.Fatalf("expected content to see %v, saw %v", expVal22, actVal22) } - val32, err := client3.GetValue("test/key2") + val32, err := client3.Get("test/key2") if err != nil { t.Fatalf("%v", err) } @@ -1904,14 +1786,14 @@ func TestCluster_SetState(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expVal32, actVal32) } - err = client3.SetValue("test/key3", "val3") + err = client3.Set("test/key3", "val3") if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val13, err := client1.GetValue("test/key3") + val13, err := client1.Get("test/key3") if err != nil { t.Fatalf("%v", err) } @@ -1920,7 +1802,7 @@ func TestCluster_SetState(t *testing.T) { if expVal13 != actVal13 { t.Fatalf("expected content to see %v, saw %v", expVal13, actVal13) } - val23, err := client2.GetValue("test/key3") + val23, err := client2.Get("test/key3") if err != nil { t.Fatalf("%v", err) } @@ -1929,7 +1811,7 @@ func TestCluster_SetState(t *testing.T) { if expVal23 != actVal23 { t.Fatalf("expected content to see %v, saw %v", expVal23, actVal23) } - val33, err := client3.GetValue("test/key3") + val33, err := client3.Get("test/key3") if err != nil { t.Fatalf("%v", err) } @@ -2044,14 +1926,14 @@ func TestCluster_DeleteState(t *testing.T) { } // set test data before delete - err = client1.SetValue("test/key1", "val1") + err = client1.Set("test/key1", "val1") if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val11, err := client1.GetValue("test/key1") + val11, err := client1.Get("test/key1") if err != nil { t.Fatalf("%v", err) } @@ -2060,7 +1942,7 @@ func TestCluster_DeleteState(t *testing.T) { if expVal11 != actVal11 { t.Fatalf("expected content to see %v, saw %v", expVal11, actVal11) } - val21, err := client2.GetValue("test/key1") + val21, err := client2.Get("test/key1") if err != nil { t.Fatalf("%v", err) } @@ -2069,7 +1951,7 @@ func TestCluster_DeleteState(t *testing.T) { if expVal21 != actVal21 { t.Fatalf("expected content to see %v, saw %v", expVal21, actVal21) } - val31, err := client3.GetValue("test/key1") + val31, err := client3.Get("test/key1") if err != nil { t.Fatalf("%v", err) } @@ -2079,14 +1961,14 @@ func TestCluster_DeleteState(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expVal31, actVal31) } - err = client2.SetValue("test/key2", "val2") + err = client2.Set("test/key2", "val2") if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val12, err := client1.GetValue("test/key2") + val12, err := client1.Get("test/key2") if err != nil { t.Fatalf("%v", err) } @@ -2095,7 +1977,7 @@ func TestCluster_DeleteState(t *testing.T) { if expVal12 != actVal12 { t.Fatalf("expected content to see %v, saw %v", expVal12, actVal12) } - val22, err := client2.GetValue("test/key2") + val22, err := client2.Get("test/key2") if err != nil { t.Fatalf("%v", err) } @@ -2104,7 +1986,7 @@ func TestCluster_DeleteState(t *testing.T) { if expVal22 != actVal22 { t.Fatalf("expected content to see %v, saw %v", expVal22, actVal22) } - val32, err := client3.GetValue("test/key2") + val32, err := client3.Get("test/key2") if err != nil { t.Fatalf("%v", err) } @@ -2114,14 +1996,14 @@ func TestCluster_DeleteState(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expVal32, actVal32) } - err = client3.SetValue("test/key3", "val3") + err = client3.Set("test/key3", "val3") if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val13, err := client1.GetValue("test/key3") + val13, err := client1.Get("test/key3") if err != nil { t.Fatalf("%v", err) } @@ -2130,7 +2012,7 @@ func TestCluster_DeleteState(t *testing.T) { if expVal13 != actVal13 { t.Fatalf("expected content to see %v, saw %v", expVal13, actVal13) } - val23, err := client2.GetValue("test/key3") + val23, err := client2.Get("test/key3") if err != nil { t.Fatalf("%v", err) } @@ -2139,7 +2021,7 @@ func TestCluster_DeleteState(t *testing.T) { if expVal23 != actVal23 { t.Fatalf("expected content to see %v, saw %v", expVal23, actVal23) } - val33, err := client3.GetValue("test/key3") + val33, err := client3.Get("test/key3") if err != nil { t.Fatalf("%v", err) } @@ -2150,28 +2032,28 @@ func TestCluster_DeleteState(t *testing.T) { } // delete - err = client1.DeleteValue("test/key1") + err = client1.Delete("test/key1") if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val11, err = client1.GetValue("test/key1") + val11, err = client1.Get("test/key1") if err != blasterrors.ErrNotFound { t.Fatalf("%v", err) } if val11 != nil { t.Fatalf("%v", err) } - val21, err = client2.GetValue("test/key1") + val21, err = client2.Get("test/key1") if err != blasterrors.ErrNotFound { t.Fatalf("%v", err) } if val21 != nil { t.Fatalf("%v", err) } - val31, err = client3.GetValue("test/key1") + val31, err = client3.Get("test/key1") if err != blasterrors.ErrNotFound { t.Fatalf("%v", err) } @@ -2179,28 +2061,28 @@ func TestCluster_DeleteState(t *testing.T) { t.Fatalf("%v", err) } - err = client2.DeleteValue("test/key2") + err = client2.Delete("test/key2") if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val12, err = client1.GetValue("test/key2") + val12, err = client1.Get("test/key2") if err != blasterrors.ErrNotFound { t.Fatalf("%v", err) } if val12 != nil { t.Fatalf("%v", err) } - val22, err = client2.GetValue("test/key2") + val22, err = client2.Get("test/key2") if err != blasterrors.ErrNotFound { t.Fatalf("%v", err) } if val22 != nil { t.Fatalf("%v", err) } - val32, err = client3.GetValue("test/key2") + val32, err = client3.Get("test/key2") if err != blasterrors.ErrNotFound { t.Fatalf("%v", err) } @@ -2208,28 +2090,28 @@ func TestCluster_DeleteState(t *testing.T) { t.Fatalf("%v", err) } - err = client3.DeleteValue("test/key3") + err = client3.Delete("test/key3") if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val13, err = client1.GetValue("test/key3") + val13, err = client1.Get("test/key3") if err != blasterrors.ErrNotFound { t.Fatalf("%v", err) } if val13 != nil { t.Fatalf("%v", err) } - val23, err = client2.GetValue("test/key3") + val23, err = client2.Get("test/key3") if err != blasterrors.ErrNotFound { t.Fatalf("%v", err) } if val23 != nil { t.Fatalf("%v", err) } - val33, err = client3.GetValue("test/key3") + val33, err = client3.Get("test/key3") if err != blasterrors.ErrNotFound { t.Fatalf("%v", err) } @@ -2238,19 +2120,19 @@ func TestCluster_DeleteState(t *testing.T) { } // delete non-existing data from manager1 - err = client1.DeleteValue("test/non-existing") + err = client1.Delete("test/non-existing") if err == nil { t.Fatalf("%v", err) } // delete non-existing data from manager2 - err = client2.DeleteValue("test/non-existing") + err = client2.Delete("test/non-existing") if err == nil { t.Fatalf("%v", err) } // delete non-existing data from manager3 - err = client3.DeleteValue("test/non-existing") + err = client3.Delete("test/non-existing") if err == nil { t.Fatalf("%v", err) } diff --git a/protobuf/management/management.pb.go b/protobuf/management/management.pb.go index 34000af..5c7a100 100644 --- a/protobuf/management/management.pb.go +++ b/protobuf/management/management.pb.go @@ -24,212 +24,218 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -type LivenessProbeResponse_State int32 +type NodeHealthCheckRequest_Probe int32 const ( - LivenessProbeResponse_UNKNOWN LivenessProbeResponse_State = 0 - LivenessProbeResponse_ALIVE LivenessProbeResponse_State = 1 - LivenessProbeResponse_DEAD LivenessProbeResponse_State = 2 + NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 0 + NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 1 + NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 2 ) -var LivenessProbeResponse_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "ALIVE", - 2: "DEAD", +var NodeHealthCheckRequest_Probe_name = map[int32]string{ + 0: "HEALTHINESS", + 1: "LIVENESS", + 2: "READINESS", } -var LivenessProbeResponse_State_value = map[string]int32{ - "UNKNOWN": 0, - "ALIVE": 1, - "DEAD": 2, +var NodeHealthCheckRequest_Probe_value = map[string]int32{ + "HEALTHINESS": 0, + "LIVENESS": 1, + "READINESS": 2, } -func (x LivenessProbeResponse_State) String() string { - return proto.EnumName(LivenessProbeResponse_State_name, int32(x)) +func (x NodeHealthCheckRequest_Probe) String() string { + return proto.EnumName(NodeHealthCheckRequest_Probe_name, int32(x)) } -func (LivenessProbeResponse_State) EnumDescriptor() ([]byte, []int) { +func (NodeHealthCheckRequest_Probe) EnumDescriptor() ([]byte, []int) { return fileDescriptor_5e030ad796566078, []int{0, 0} } -type ReadinessProbeResponse_State int32 +type NodeHealthCheckResponse_State int32 const ( - ReadinessProbeResponse_UNKNOWN ReadinessProbeResponse_State = 0 - ReadinessProbeResponse_READY ReadinessProbeResponse_State = 1 - ReadinessProbeResponse_NOT_READY ReadinessProbeResponse_State = 2 + NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 0 + NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 1 + NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 2 + NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 3 + NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 4 + NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 5 ) -var ReadinessProbeResponse_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "READY", - 2: "NOT_READY", +var NodeHealthCheckResponse_State_name = map[int32]string{ + 0: "HEALTHY", + 1: "UNHEALTHY", + 2: "ALIVE", + 3: "DEAD", + 4: "READY", + 5: "NOT_READY", } -var ReadinessProbeResponse_State_value = map[string]int32{ - "UNKNOWN": 0, - "READY": 1, - "NOT_READY": 2, +var NodeHealthCheckResponse_State_value = map[string]int32{ + "HEALTHY": 0, + "UNHEALTHY": 1, + "ALIVE": 2, + "DEAD": 3, + "READY": 4, + "NOT_READY": 5, } -func (x ReadinessProbeResponse_State) String() string { - return proto.EnumName(ReadinessProbeResponse_State_name, int32(x)) +func (x NodeHealthCheckResponse_State) String() string { + return proto.EnumName(NodeHealthCheckResponse_State_name, int32(x)) } -func (ReadinessProbeResponse_State) EnumDescriptor() ([]byte, []int) { +func (NodeHealthCheckResponse_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor_5e030ad796566078, []int{1, 0} } -type WatchStoreResponse_Command int32 +type WatchResponse_Command int32 const ( - WatchStoreResponse_UNKNOWN WatchStoreResponse_Command = 0 - WatchStoreResponse_SET WatchStoreResponse_Command = 1 - WatchStoreResponse_DELETE WatchStoreResponse_Command = 2 + WatchResponse_UNKNOWN WatchResponse_Command = 0 + WatchResponse_SET WatchResponse_Command = 1 + WatchResponse_DELETE WatchResponse_Command = 2 ) -var WatchStoreResponse_Command_name = map[int32]string{ +var WatchResponse_Command_name = map[int32]string{ 0: "UNKNOWN", 1: "SET", 2: "DELETE", } -var WatchStoreResponse_Command_value = map[string]int32{ +var WatchResponse_Command_value = map[string]int32{ "UNKNOWN": 0, "SET": 1, "DELETE": 2, } -func (x WatchStoreResponse_Command) String() string { - return proto.EnumName(WatchStoreResponse_Command_name, int32(x)) +func (x WatchResponse_Command) String() string { + return proto.EnumName(WatchResponse_Command_name, int32(x)) } -func (WatchStoreResponse_Command) EnumDescriptor() ([]byte, []int) { +func (WatchResponse_Command) EnumDescriptor() ([]byte, []int) { return fileDescriptor_5e030ad796566078, []int{12, 0} } -// use for health check -type LivenessProbeResponse struct { - State LivenessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=management.LivenessProbeResponse_State" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type NodeHealthCheckRequest struct { + Probe NodeHealthCheckRequest_Probe `protobuf:"varint,1,opt,name=probe,proto3,enum=management.NodeHealthCheckRequest_Probe" json:"probe,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *LivenessProbeResponse) Reset() { *m = LivenessProbeResponse{} } -func (m *LivenessProbeResponse) String() string { return proto.CompactTextString(m) } -func (*LivenessProbeResponse) ProtoMessage() {} -func (*LivenessProbeResponse) Descriptor() ([]byte, []int) { +func (m *NodeHealthCheckRequest) Reset() { *m = NodeHealthCheckRequest{} } +func (m *NodeHealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*NodeHealthCheckRequest) ProtoMessage() {} +func (*NodeHealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5e030ad796566078, []int{0} } -func (m *LivenessProbeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LivenessProbeResponse.Unmarshal(m, b) +func (m *NodeHealthCheckRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeHealthCheckRequest.Unmarshal(m, b) } -func (m *LivenessProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LivenessProbeResponse.Marshal(b, m, deterministic) +func (m *NodeHealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeHealthCheckRequest.Marshal(b, m, deterministic) } -func (m *LivenessProbeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LivenessProbeResponse.Merge(m, src) +func (m *NodeHealthCheckRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeHealthCheckRequest.Merge(m, src) } -func (m *LivenessProbeResponse) XXX_Size() int { - return xxx_messageInfo_LivenessProbeResponse.Size(m) +func (m *NodeHealthCheckRequest) XXX_Size() int { + return xxx_messageInfo_NodeHealthCheckRequest.Size(m) } -func (m *LivenessProbeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LivenessProbeResponse.DiscardUnknown(m) +func (m *NodeHealthCheckRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeHealthCheckRequest.DiscardUnknown(m) } -var xxx_messageInfo_LivenessProbeResponse proto.InternalMessageInfo +var xxx_messageInfo_NodeHealthCheckRequest proto.InternalMessageInfo -func (m *LivenessProbeResponse) GetState() LivenessProbeResponse_State { +func (m *NodeHealthCheckRequest) GetProbe() NodeHealthCheckRequest_Probe { if m != nil { - return m.State + return m.Probe } - return LivenessProbeResponse_UNKNOWN + return NodeHealthCheckRequest_HEALTHINESS } -// use for health check -type ReadinessProbeResponse struct { - State ReadinessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=management.ReadinessProbeResponse_State" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type NodeHealthCheckResponse struct { + State NodeHealthCheckResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=management.NodeHealthCheckResponse_State" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ReadinessProbeResponse) Reset() { *m = ReadinessProbeResponse{} } -func (m *ReadinessProbeResponse) String() string { return proto.CompactTextString(m) } -func (*ReadinessProbeResponse) ProtoMessage() {} -func (*ReadinessProbeResponse) Descriptor() ([]byte, []int) { +func (m *NodeHealthCheckResponse) Reset() { *m = NodeHealthCheckResponse{} } +func (m *NodeHealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*NodeHealthCheckResponse) ProtoMessage() {} +func (*NodeHealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5e030ad796566078, []int{1} } -func (m *ReadinessProbeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadinessProbeResponse.Unmarshal(m, b) +func (m *NodeHealthCheckResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeHealthCheckResponse.Unmarshal(m, b) } -func (m *ReadinessProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadinessProbeResponse.Marshal(b, m, deterministic) +func (m *NodeHealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeHealthCheckResponse.Marshal(b, m, deterministic) } -func (m *ReadinessProbeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadinessProbeResponse.Merge(m, src) +func (m *NodeHealthCheckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeHealthCheckResponse.Merge(m, src) } -func (m *ReadinessProbeResponse) XXX_Size() int { - return xxx_messageInfo_ReadinessProbeResponse.Size(m) +func (m *NodeHealthCheckResponse) XXX_Size() int { + return xxx_messageInfo_NodeHealthCheckResponse.Size(m) } -func (m *ReadinessProbeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReadinessProbeResponse.DiscardUnknown(m) +func (m *NodeHealthCheckResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeHealthCheckResponse.DiscardUnknown(m) } -var xxx_messageInfo_ReadinessProbeResponse proto.InternalMessageInfo +var xxx_messageInfo_NodeHealthCheckResponse proto.InternalMessageInfo -func (m *ReadinessProbeResponse) GetState() ReadinessProbeResponse_State { +func (m *NodeHealthCheckResponse) GetState() NodeHealthCheckResponse_State { if m != nil { return m.State } - return ReadinessProbeResponse_UNKNOWN + return NodeHealthCheckResponse_HEALTHY } -// use for raft cluster status -type GetNodeRequest struct { +// use for raft +type NodeInfoRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *GetNodeRequest) Reset() { *m = GetNodeRequest{} } -func (m *GetNodeRequest) String() string { return proto.CompactTextString(m) } -func (*GetNodeRequest) ProtoMessage() {} -func (*GetNodeRequest) Descriptor() ([]byte, []int) { +func (m *NodeInfoRequest) Reset() { *m = NodeInfoRequest{} } +func (m *NodeInfoRequest) String() string { return proto.CompactTextString(m) } +func (*NodeInfoRequest) ProtoMessage() {} +func (*NodeInfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5e030ad796566078, []int{2} } -func (m *GetNodeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetNodeRequest.Unmarshal(m, b) +func (m *NodeInfoRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeInfoRequest.Unmarshal(m, b) } -func (m *GetNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetNodeRequest.Marshal(b, m, deterministic) +func (m *NodeInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeInfoRequest.Marshal(b, m, deterministic) } -func (m *GetNodeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNodeRequest.Merge(m, src) +func (m *NodeInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeInfoRequest.Merge(m, src) } -func (m *GetNodeRequest) XXX_Size() int { - return xxx_messageInfo_GetNodeRequest.Size(m) +func (m *NodeInfoRequest) XXX_Size() int { + return xxx_messageInfo_NodeInfoRequest.Size(m) } -func (m *GetNodeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetNodeRequest.DiscardUnknown(m) +func (m *NodeInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeInfoRequest.DiscardUnknown(m) } -var xxx_messageInfo_GetNodeRequest proto.InternalMessageInfo +var xxx_messageInfo_NodeInfoRequest proto.InternalMessageInfo -func (m *GetNodeRequest) GetId() string { +func (m *NodeInfoRequest) GetId() string { if m != nil { return m.Id } return "" } -// use for raft cluster status -type GetNodeResponse struct { +type NodeInfoResponse struct { NodeConfig *any.Any `protobuf:"bytes,1,opt,name=nodeConfig,proto3" json:"nodeConfig,omitempty"` State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -237,47 +243,46 @@ type GetNodeResponse struct { XXX_sizecache int32 `json:"-"` } -func (m *GetNodeResponse) Reset() { *m = GetNodeResponse{} } -func (m *GetNodeResponse) String() string { return proto.CompactTextString(m) } -func (*GetNodeResponse) ProtoMessage() {} -func (*GetNodeResponse) Descriptor() ([]byte, []int) { +func (m *NodeInfoResponse) Reset() { *m = NodeInfoResponse{} } +func (m *NodeInfoResponse) String() string { return proto.CompactTextString(m) } +func (*NodeInfoResponse) ProtoMessage() {} +func (*NodeInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5e030ad796566078, []int{3} } -func (m *GetNodeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetNodeResponse.Unmarshal(m, b) +func (m *NodeInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeInfoResponse.Unmarshal(m, b) } -func (m *GetNodeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetNodeResponse.Marshal(b, m, deterministic) +func (m *NodeInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeInfoResponse.Marshal(b, m, deterministic) } -func (m *GetNodeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNodeResponse.Merge(m, src) +func (m *NodeInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeInfoResponse.Merge(m, src) } -func (m *GetNodeResponse) XXX_Size() int { - return xxx_messageInfo_GetNodeResponse.Size(m) +func (m *NodeInfoResponse) XXX_Size() int { + return xxx_messageInfo_NodeInfoResponse.Size(m) } -func (m *GetNodeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetNodeResponse.DiscardUnknown(m) +func (m *NodeInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeInfoResponse.DiscardUnknown(m) } -var xxx_messageInfo_GetNodeResponse proto.InternalMessageInfo +var xxx_messageInfo_NodeInfoResponse proto.InternalMessageInfo -func (m *GetNodeResponse) GetNodeConfig() *any.Any { +func (m *NodeInfoResponse) GetNodeConfig() *any.Any { if m != nil { return m.NodeConfig } return nil } -func (m *GetNodeResponse) GetState() string { +func (m *NodeInfoResponse) GetState() string { if m != nil { return m.State } return "" } -// use for raft cluster status -type SetNodeRequest struct { +type ClusterJoinRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` NodeConfig *any.Any `protobuf:"bytes,2,opt,name=nodeConfig,proto3" json:"nodeConfig,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -285,204 +290,202 @@ type SetNodeRequest struct { XXX_sizecache int32 `json:"-"` } -func (m *SetNodeRequest) Reset() { *m = SetNodeRequest{} } -func (m *SetNodeRequest) String() string { return proto.CompactTextString(m) } -func (*SetNodeRequest) ProtoMessage() {} -func (*SetNodeRequest) Descriptor() ([]byte, []int) { +func (m *ClusterJoinRequest) Reset() { *m = ClusterJoinRequest{} } +func (m *ClusterJoinRequest) String() string { return proto.CompactTextString(m) } +func (*ClusterJoinRequest) ProtoMessage() {} +func (*ClusterJoinRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5e030ad796566078, []int{4} } -func (m *SetNodeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetNodeRequest.Unmarshal(m, b) +func (m *ClusterJoinRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterJoinRequest.Unmarshal(m, b) } -func (m *SetNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetNodeRequest.Marshal(b, m, deterministic) +func (m *ClusterJoinRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterJoinRequest.Marshal(b, m, deterministic) } -func (m *SetNodeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetNodeRequest.Merge(m, src) +func (m *ClusterJoinRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterJoinRequest.Merge(m, src) } -func (m *SetNodeRequest) XXX_Size() int { - return xxx_messageInfo_SetNodeRequest.Size(m) +func (m *ClusterJoinRequest) XXX_Size() int { + return xxx_messageInfo_ClusterJoinRequest.Size(m) } -func (m *SetNodeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetNodeRequest.DiscardUnknown(m) +func (m *ClusterJoinRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterJoinRequest.DiscardUnknown(m) } -var xxx_messageInfo_SetNodeRequest proto.InternalMessageInfo +var xxx_messageInfo_ClusterJoinRequest proto.InternalMessageInfo -func (m *SetNodeRequest) GetId() string { +func (m *ClusterJoinRequest) GetId() string { if m != nil { return m.Id } return "" } -func (m *SetNodeRequest) GetNodeConfig() *any.Any { +func (m *ClusterJoinRequest) GetNodeConfig() *any.Any { if m != nil { return m.NodeConfig } return nil } -// use for raft cluster status -type DeleteNodeRequest struct { +type ClusterLeaveRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *DeleteNodeRequest) Reset() { *m = DeleteNodeRequest{} } -func (m *DeleteNodeRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteNodeRequest) ProtoMessage() {} -func (*DeleteNodeRequest) Descriptor() ([]byte, []int) { +func (m *ClusterLeaveRequest) Reset() { *m = ClusterLeaveRequest{} } +func (m *ClusterLeaveRequest) String() string { return proto.CompactTextString(m) } +func (*ClusterLeaveRequest) ProtoMessage() {} +func (*ClusterLeaveRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5e030ad796566078, []int{5} } -func (m *DeleteNodeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteNodeRequest.Unmarshal(m, b) +func (m *ClusterLeaveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterLeaveRequest.Unmarshal(m, b) } -func (m *DeleteNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteNodeRequest.Marshal(b, m, deterministic) +func (m *ClusterLeaveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterLeaveRequest.Marshal(b, m, deterministic) } -func (m *DeleteNodeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteNodeRequest.Merge(m, src) +func (m *ClusterLeaveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterLeaveRequest.Merge(m, src) } -func (m *DeleteNodeRequest) XXX_Size() int { - return xxx_messageInfo_DeleteNodeRequest.Size(m) +func (m *ClusterLeaveRequest) XXX_Size() int { + return xxx_messageInfo_ClusterLeaveRequest.Size(m) } -func (m *DeleteNodeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteNodeRequest.DiscardUnknown(m) +func (m *ClusterLeaveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterLeaveRequest.DiscardUnknown(m) } -var xxx_messageInfo_DeleteNodeRequest proto.InternalMessageInfo +var xxx_messageInfo_ClusterLeaveRequest proto.InternalMessageInfo -func (m *DeleteNodeRequest) GetId() string { +func (m *ClusterLeaveRequest) GetId() string { if m != nil { return m.Id } return "" } -// use for raft cluster status -type GetClusterResponse struct { +type ClusterInfoResponse struct { Cluster *any.Any `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *GetClusterResponse) Reset() { *m = GetClusterResponse{} } -func (m *GetClusterResponse) String() string { return proto.CompactTextString(m) } -func (*GetClusterResponse) ProtoMessage() {} -func (*GetClusterResponse) Descriptor() ([]byte, []int) { +func (m *ClusterInfoResponse) Reset() { *m = ClusterInfoResponse{} } +func (m *ClusterInfoResponse) String() string { return proto.CompactTextString(m) } +func (*ClusterInfoResponse) ProtoMessage() {} +func (*ClusterInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5e030ad796566078, []int{6} } -func (m *GetClusterResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetClusterResponse.Unmarshal(m, b) +func (m *ClusterInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterInfoResponse.Unmarshal(m, b) } -func (m *GetClusterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetClusterResponse.Marshal(b, m, deterministic) +func (m *ClusterInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterInfoResponse.Marshal(b, m, deterministic) } -func (m *GetClusterResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetClusterResponse.Merge(m, src) +func (m *ClusterInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterInfoResponse.Merge(m, src) } -func (m *GetClusterResponse) XXX_Size() int { - return xxx_messageInfo_GetClusterResponse.Size(m) +func (m *ClusterInfoResponse) XXX_Size() int { + return xxx_messageInfo_ClusterInfoResponse.Size(m) } -func (m *GetClusterResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetClusterResponse.DiscardUnknown(m) +func (m *ClusterInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterInfoResponse.DiscardUnknown(m) } -var xxx_messageInfo_GetClusterResponse proto.InternalMessageInfo +var xxx_messageInfo_ClusterInfoResponse proto.InternalMessageInfo -func (m *GetClusterResponse) GetCluster() *any.Any { +func (m *ClusterInfoResponse) GetCluster() *any.Any { if m != nil { return m.Cluster } return nil } -type GetValueRequest struct { +type GetRequest struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *GetValueRequest) Reset() { *m = GetValueRequest{} } -func (m *GetValueRequest) String() string { return proto.CompactTextString(m) } -func (*GetValueRequest) ProtoMessage() {} -func (*GetValueRequest) Descriptor() ([]byte, []int) { +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} +func (*GetRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5e030ad796566078, []int{7} } -func (m *GetValueRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetValueRequest.Unmarshal(m, b) +func (m *GetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetRequest.Unmarshal(m, b) } -func (m *GetValueRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetValueRequest.Marshal(b, m, deterministic) +func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) } -func (m *GetValueRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetValueRequest.Merge(m, src) +func (m *GetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRequest.Merge(m, src) } -func (m *GetValueRequest) XXX_Size() int { - return xxx_messageInfo_GetValueRequest.Size(m) +func (m *GetRequest) XXX_Size() int { + return xxx_messageInfo_GetRequest.Size(m) } -func (m *GetValueRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetValueRequest.DiscardUnknown(m) +func (m *GetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRequest.DiscardUnknown(m) } -var xxx_messageInfo_GetValueRequest proto.InternalMessageInfo +var xxx_messageInfo_GetRequest proto.InternalMessageInfo -func (m *GetValueRequest) GetKey() string { +func (m *GetRequest) GetKey() string { if m != nil { return m.Key } return "" } -type GetValueResponse struct { +type GetResponse struct { Value *any.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *GetValueResponse) Reset() { *m = GetValueResponse{} } -func (m *GetValueResponse) String() string { return proto.CompactTextString(m) } -func (*GetValueResponse) ProtoMessage() {} -func (*GetValueResponse) Descriptor() ([]byte, []int) { +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} +func (*GetResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5e030ad796566078, []int{8} } -func (m *GetValueResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetValueResponse.Unmarshal(m, b) +func (m *GetResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetResponse.Unmarshal(m, b) } -func (m *GetValueResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetValueResponse.Marshal(b, m, deterministic) +func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) } -func (m *GetValueResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetValueResponse.Merge(m, src) +func (m *GetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResponse.Merge(m, src) } -func (m *GetValueResponse) XXX_Size() int { - return xxx_messageInfo_GetValueResponse.Size(m) +func (m *GetResponse) XXX_Size() int { + return xxx_messageInfo_GetResponse.Size(m) } -func (m *GetValueResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetValueResponse.DiscardUnknown(m) +func (m *GetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetResponse.DiscardUnknown(m) } -var xxx_messageInfo_GetValueResponse proto.InternalMessageInfo +var xxx_messageInfo_GetResponse proto.InternalMessageInfo -func (m *GetValueResponse) GetValue() *any.Any { +func (m *GetResponse) GetValue() *any.Any { if m != nil { return m.Value } return nil } -type SetValueRequest struct { +type SetRequest struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Value *any.Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -490,172 +493,172 @@ type SetValueRequest struct { XXX_sizecache int32 `json:"-"` } -func (m *SetValueRequest) Reset() { *m = SetValueRequest{} } -func (m *SetValueRequest) String() string { return proto.CompactTextString(m) } -func (*SetValueRequest) ProtoMessage() {} -func (*SetValueRequest) Descriptor() ([]byte, []int) { +func (m *SetRequest) Reset() { *m = SetRequest{} } +func (m *SetRequest) String() string { return proto.CompactTextString(m) } +func (*SetRequest) ProtoMessage() {} +func (*SetRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5e030ad796566078, []int{9} } -func (m *SetValueRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetValueRequest.Unmarshal(m, b) +func (m *SetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetRequest.Unmarshal(m, b) } -func (m *SetValueRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetValueRequest.Marshal(b, m, deterministic) +func (m *SetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetRequest.Marshal(b, m, deterministic) } -func (m *SetValueRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetValueRequest.Merge(m, src) +func (m *SetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetRequest.Merge(m, src) } -func (m *SetValueRequest) XXX_Size() int { - return xxx_messageInfo_SetValueRequest.Size(m) +func (m *SetRequest) XXX_Size() int { + return xxx_messageInfo_SetRequest.Size(m) } -func (m *SetValueRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetValueRequest.DiscardUnknown(m) +func (m *SetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetRequest.DiscardUnknown(m) } -var xxx_messageInfo_SetValueRequest proto.InternalMessageInfo +var xxx_messageInfo_SetRequest proto.InternalMessageInfo -func (m *SetValueRequest) GetKey() string { +func (m *SetRequest) GetKey() string { if m != nil { return m.Key } return "" } -func (m *SetValueRequest) GetValue() *any.Any { +func (m *SetRequest) GetValue() *any.Any { if m != nil { return m.Value } return nil } -type DeleteValueRequest struct { +type DeleteRequest struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *DeleteValueRequest) Reset() { *m = DeleteValueRequest{} } -func (m *DeleteValueRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteValueRequest) ProtoMessage() {} -func (*DeleteValueRequest) Descriptor() ([]byte, []int) { +func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } +func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRequest) ProtoMessage() {} +func (*DeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5e030ad796566078, []int{10} } -func (m *DeleteValueRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteValueRequest.Unmarshal(m, b) +func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) } -func (m *DeleteValueRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteValueRequest.Marshal(b, m, deterministic) +func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) } -func (m *DeleteValueRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteValueRequest.Merge(m, src) +func (m *DeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteRequest.Merge(m, src) } -func (m *DeleteValueRequest) XXX_Size() int { - return xxx_messageInfo_DeleteValueRequest.Size(m) +func (m *DeleteRequest) XXX_Size() int { + return xxx_messageInfo_DeleteRequest.Size(m) } -func (m *DeleteValueRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteValueRequest.DiscardUnknown(m) +func (m *DeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteRequest.DiscardUnknown(m) } -var xxx_messageInfo_DeleteValueRequest proto.InternalMessageInfo +var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo -func (m *DeleteValueRequest) GetKey() string { +func (m *DeleteRequest) GetKey() string { if m != nil { return m.Key } return "" } -type WatchStoreRequest struct { +type WatchRequest struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *WatchStoreRequest) Reset() { *m = WatchStoreRequest{} } -func (m *WatchStoreRequest) String() string { return proto.CompactTextString(m) } -func (*WatchStoreRequest) ProtoMessage() {} -func (*WatchStoreRequest) Descriptor() ([]byte, []int) { +func (m *WatchRequest) Reset() { *m = WatchRequest{} } +func (m *WatchRequest) String() string { return proto.CompactTextString(m) } +func (*WatchRequest) ProtoMessage() {} +func (*WatchRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5e030ad796566078, []int{11} } -func (m *WatchStoreRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_WatchStoreRequest.Unmarshal(m, b) +func (m *WatchRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WatchRequest.Unmarshal(m, b) } -func (m *WatchStoreRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_WatchStoreRequest.Marshal(b, m, deterministic) +func (m *WatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WatchRequest.Marshal(b, m, deterministic) } -func (m *WatchStoreRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchStoreRequest.Merge(m, src) +func (m *WatchRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchRequest.Merge(m, src) } -func (m *WatchStoreRequest) XXX_Size() int { - return xxx_messageInfo_WatchStoreRequest.Size(m) +func (m *WatchRequest) XXX_Size() int { + return xxx_messageInfo_WatchRequest.Size(m) } -func (m *WatchStoreRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WatchStoreRequest.DiscardUnknown(m) +func (m *WatchRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WatchRequest.DiscardUnknown(m) } -var xxx_messageInfo_WatchStoreRequest proto.InternalMessageInfo +var xxx_messageInfo_WatchRequest proto.InternalMessageInfo -func (m *WatchStoreRequest) GetKey() string { +func (m *WatchRequest) GetKey() string { if m != nil { return m.Key } return "" } -type WatchStoreResponse struct { - Command WatchStoreResponse_Command `protobuf:"varint,1,opt,name=command,proto3,enum=management.WatchStoreResponse_Command" json:"command,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type WatchResponse struct { + Command WatchResponse_Command `protobuf:"varint,1,opt,name=command,proto3,enum=management.WatchResponse_Command" json:"command,omitempty"` + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *WatchStoreResponse) Reset() { *m = WatchStoreResponse{} } -func (m *WatchStoreResponse) String() string { return proto.CompactTextString(m) } -func (*WatchStoreResponse) ProtoMessage() {} -func (*WatchStoreResponse) Descriptor() ([]byte, []int) { +func (m *WatchResponse) Reset() { *m = WatchResponse{} } +func (m *WatchResponse) String() string { return proto.CompactTextString(m) } +func (*WatchResponse) ProtoMessage() {} +func (*WatchResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5e030ad796566078, []int{12} } -func (m *WatchStoreResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_WatchStoreResponse.Unmarshal(m, b) +func (m *WatchResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WatchResponse.Unmarshal(m, b) } -func (m *WatchStoreResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_WatchStoreResponse.Marshal(b, m, deterministic) +func (m *WatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WatchResponse.Marshal(b, m, deterministic) } -func (m *WatchStoreResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchStoreResponse.Merge(m, src) +func (m *WatchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchResponse.Merge(m, src) } -func (m *WatchStoreResponse) XXX_Size() int { - return xxx_messageInfo_WatchStoreResponse.Size(m) +func (m *WatchResponse) XXX_Size() int { + return xxx_messageInfo_WatchResponse.Size(m) } -func (m *WatchStoreResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WatchStoreResponse.DiscardUnknown(m) +func (m *WatchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WatchResponse.DiscardUnknown(m) } -var xxx_messageInfo_WatchStoreResponse proto.InternalMessageInfo +var xxx_messageInfo_WatchResponse proto.InternalMessageInfo -func (m *WatchStoreResponse) GetCommand() WatchStoreResponse_Command { +func (m *WatchResponse) GetCommand() WatchResponse_Command { if m != nil { return m.Command } - return WatchStoreResponse_UNKNOWN + return WatchResponse_UNKNOWN } -func (m *WatchStoreResponse) GetKey() string { +func (m *WatchResponse) GetKey() string { if m != nil { return m.Key } return "" } -func (m *WatchStoreResponse) GetValue() *any.Any { +func (m *WatchResponse) GetValue() *any.Any { if m != nil { return m.Value } @@ -663,22 +666,22 @@ func (m *WatchStoreResponse) GetValue() *any.Any { } func init() { - proto.RegisterEnum("management.LivenessProbeResponse_State", LivenessProbeResponse_State_name, LivenessProbeResponse_State_value) - proto.RegisterEnum("management.ReadinessProbeResponse_State", ReadinessProbeResponse_State_name, ReadinessProbeResponse_State_value) - proto.RegisterEnum("management.WatchStoreResponse_Command", WatchStoreResponse_Command_name, WatchStoreResponse_Command_value) - proto.RegisterType((*LivenessProbeResponse)(nil), "management.LivenessProbeResponse") - proto.RegisterType((*ReadinessProbeResponse)(nil), "management.ReadinessProbeResponse") - proto.RegisterType((*GetNodeRequest)(nil), "management.GetNodeRequest") - proto.RegisterType((*GetNodeResponse)(nil), "management.GetNodeResponse") - proto.RegisterType((*SetNodeRequest)(nil), "management.SetNodeRequest") - proto.RegisterType((*DeleteNodeRequest)(nil), "management.DeleteNodeRequest") - proto.RegisterType((*GetClusterResponse)(nil), "management.GetClusterResponse") - proto.RegisterType((*GetValueRequest)(nil), "management.GetValueRequest") - proto.RegisterType((*GetValueResponse)(nil), "management.GetValueResponse") - proto.RegisterType((*SetValueRequest)(nil), "management.SetValueRequest") - proto.RegisterType((*DeleteValueRequest)(nil), "management.DeleteValueRequest") - proto.RegisterType((*WatchStoreRequest)(nil), "management.WatchStoreRequest") - proto.RegisterType((*WatchStoreResponse)(nil), "management.WatchStoreResponse") + proto.RegisterEnum("management.NodeHealthCheckRequest_Probe", NodeHealthCheckRequest_Probe_name, NodeHealthCheckRequest_Probe_value) + proto.RegisterEnum("management.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) + proto.RegisterEnum("management.WatchResponse_Command", WatchResponse_Command_name, WatchResponse_Command_value) + proto.RegisterType((*NodeHealthCheckRequest)(nil), "management.NodeHealthCheckRequest") + proto.RegisterType((*NodeHealthCheckResponse)(nil), "management.NodeHealthCheckResponse") + proto.RegisterType((*NodeInfoRequest)(nil), "management.NodeInfoRequest") + proto.RegisterType((*NodeInfoResponse)(nil), "management.NodeInfoResponse") + proto.RegisterType((*ClusterJoinRequest)(nil), "management.ClusterJoinRequest") + proto.RegisterType((*ClusterLeaveRequest)(nil), "management.ClusterLeaveRequest") + proto.RegisterType((*ClusterInfoResponse)(nil), "management.ClusterInfoResponse") + proto.RegisterType((*GetRequest)(nil), "management.GetRequest") + proto.RegisterType((*GetResponse)(nil), "management.GetResponse") + proto.RegisterType((*SetRequest)(nil), "management.SetRequest") + proto.RegisterType((*DeleteRequest)(nil), "management.DeleteRequest") + proto.RegisterType((*WatchRequest)(nil), "management.WatchRequest") + proto.RegisterType((*WatchResponse)(nil), "management.WatchResponse") } func init() { @@ -686,50 +689,52 @@ func init() { } var fileDescriptor_5e030ad796566078 = []byte{ - // 674 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xef, 0x4f, 0xd3, 0x50, - 0x14, 0x5d, 0x8b, 0xa3, 0xe3, 0x22, 0xa3, 0xbc, 0x20, 0xd1, 0xa2, 0x04, 0x1f, 0x8a, 0x28, 0xb1, - 0x33, 0xe8, 0x47, 0x45, 0x27, 0xad, 0x8b, 0x3a, 0x37, 0xd3, 0x22, 0x44, 0x13, 0x63, 0xba, 0xed, - 0x31, 0x16, 0xd6, 0xbe, 0xb9, 0xbe, 0x91, 0xf0, 0xd1, 0x6f, 0xfe, 0x5b, 0xfe, 0x47, 0xfe, 0x09, - 0x66, 0xfd, 0xfd, 0xbb, 0x24, 0x7e, 0x6b, 0xdf, 0xbd, 0xf7, 0x9c, 0x73, 0xcf, 0xde, 0x69, 0x06, - 0x0f, 0x26, 0x53, 0xca, 0x68, 0x6f, 0x76, 0xd6, 0x30, 0x0d, 0xcb, 0x18, 0x12, 0x93, 0x58, 0x2c, - 0xf2, 0x28, 0x3b, 0x65, 0x04, 0xe1, 0x89, 0x74, 0x67, 0x48, 0xe9, 0x70, 0x4c, 0x1a, 0xc1, 0xa0, - 0x61, 0x5d, 0xb9, 0x6d, 0xd2, 0x66, 0xb2, 0x44, 0xcc, 0x09, 0xf3, 0x8a, 0xf8, 0x17, 0x07, 0xb7, - 0xda, 0xa3, 0x4b, 0x62, 0x11, 0xdb, 0xfe, 0x3c, 0xa5, 0x3d, 0xa2, 0x11, 0x7b, 0x42, 0x2d, 0x9b, - 0xa0, 0x57, 0x50, 0xb5, 0x99, 0xc1, 0xc8, 0x6d, 0x6e, 0x9b, 0xdb, 0xab, 0x1f, 0x3c, 0x92, 0x23, - 0xfc, 0x99, 0x13, 0xb2, 0x3e, 0x6f, 0xd7, 0xdc, 0x29, 0xfc, 0x18, 0xaa, 0xce, 0x3b, 0x5a, 0x06, - 0xe1, 0x4b, 0xe7, 0x63, 0xa7, 0x7b, 0xda, 0x11, 0x2b, 0x68, 0x09, 0xaa, 0xcd, 0xf6, 0xfb, 0x13, - 0x55, 0xe4, 0x50, 0x0d, 0x6e, 0x28, 0x6a, 0x53, 0x11, 0x79, 0xfc, 0x9b, 0x83, 0x0d, 0x8d, 0x18, - 0x83, 0x51, 0x5a, 0xc4, 0x61, 0x5c, 0xc4, 0x5e, 0x54, 0x44, 0xf6, 0x48, 0x5c, 0x85, 0x9c, 0xa7, - 0x42, 0x53, 0x9b, 0xca, 0x57, 0x91, 0x43, 0x2b, 0xb0, 0xd4, 0xe9, 0x1e, 0xff, 0x70, 0x5f, 0x79, - 0xbc, 0x0d, 0xf5, 0x16, 0x61, 0x1d, 0x3a, 0x20, 0x1a, 0xf9, 0x39, 0x23, 0x36, 0x43, 0x75, 0xe0, - 0x47, 0x03, 0x87, 0x7e, 0x49, 0xe3, 0x47, 0x03, 0xfc, 0x1d, 0x56, 0x83, 0x0e, 0x4f, 0xe4, 0x0b, - 0x00, 0x8b, 0x0e, 0xc8, 0x11, 0xb5, 0xce, 0x46, 0x43, 0xa7, 0x75, 0xf9, 0x60, 0x5d, 0x76, 0x5d, - 0x97, 0x7d, 0xd7, 0xe5, 0xa6, 0x75, 0xa5, 0x45, 0xfa, 0xd0, 0xba, 0xbf, 0x1a, 0xef, 0x60, 0x7b, - 0x82, 0x4f, 0xa0, 0xae, 0x17, 0x0a, 0x48, 0xb0, 0xf1, 0xd7, 0x63, 0xc3, 0x3b, 0xb0, 0xa6, 0x90, - 0x31, 0x61, 0xa4, 0x68, 0x37, 0x05, 0x50, 0x8b, 0xb0, 0xa3, 0xf1, 0xcc, 0x66, 0x64, 0x1a, 0xac, - 0x27, 0x83, 0xd0, 0x77, 0x8f, 0x0a, 0x77, 0xf3, 0x9b, 0xf0, 0x8e, 0xe3, 0xd0, 0x89, 0x31, 0x9e, - 0x05, 0x44, 0x22, 0x2c, 0x5c, 0x90, 0x2b, 0x8f, 0x69, 0xfe, 0x88, 0x0f, 0x41, 0x0c, 0x9b, 0x3c, - 0xa2, 0x27, 0x50, 0xbd, 0x9c, 0x1f, 0x14, 0xd2, 0xb8, 0x2d, 0xb8, 0x0b, 0xab, 0x7a, 0x19, 0x49, - 0x08, 0xc8, 0x97, 0x03, 0xee, 0x02, 0x72, 0x0d, 0x2a, 0x11, 0xfe, 0x10, 0xd6, 0x4e, 0x0d, 0xd6, - 0x3f, 0xd7, 0x19, 0x9d, 0x16, 0xb4, 0xfd, 0xe1, 0x00, 0x45, 0xfb, 0xbc, 0x15, 0xdf, 0x80, 0xd0, - 0xa7, 0xa6, 0x69, 0x58, 0x03, 0xef, 0x46, 0xef, 0x46, 0x6f, 0x74, 0x7a, 0x40, 0x3e, 0x72, 0xbb, - 0x35, 0x7f, 0xcc, 0xa7, 0xe2, 0x33, 0xb6, 0x5c, 0x28, 0xdf, 0x72, 0x1f, 0x04, 0x0f, 0x31, 0x9e, - 0x08, 0x01, 0x16, 0x74, 0xf5, 0x58, 0xe4, 0x10, 0xc0, 0xa2, 0xa2, 0xb6, 0xd5, 0x63, 0x55, 0xe4, - 0x0f, 0xfe, 0x2e, 0x02, 0x7c, 0x0a, 0xd4, 0xa1, 0x36, 0xac, 0xc4, 0x72, 0x8f, 0x36, 0x52, 0x4c, - 0xea, 0xfc, 0xcb, 0x22, 0xdd, 0x2f, 0xfd, 0x54, 0xe0, 0x0a, 0xea, 0x40, 0x3d, 0x1e, 0xe0, 0x5c, - 0x38, 0x5c, 0x1e, 0x7a, 0x5c, 0x41, 0x0a, 0x08, 0x5e, 0x2e, 0x91, 0x14, 0x1d, 0x88, 0xc7, 0x59, - 0xda, 0xcc, 0xac, 0x05, 0x28, 0xaf, 0x41, 0xd0, 0xb3, 0x50, 0xe2, 0x99, 0x94, 0x72, 0xa4, 0xe2, - 0x0a, 0x52, 0x01, 0xc2, 0x9c, 0xa1, 0x7b, 0x51, 0x8c, 0x54, 0xfe, 0x0a, 0x60, 0xde, 0x01, 0x84, - 0x49, 0xcc, 0x75, 0x66, 0x2b, 0xb1, 0x4c, 0x22, 0xb9, 0xb8, 0x82, 0x3e, 0xc0, 0x4d, 0xe7, 0x52, - 0xfd, 0x37, 0xd2, 0x33, 0x0e, 0xb5, 0xa0, 0xe6, 0x47, 0x16, 0x25, 0x6d, 0x8c, 0x86, 0x46, 0xba, - 0x9b, 0x5d, 0x0c, 0x44, 0x35, 0xa1, 0xa6, 0x67, 0x02, 0x25, 0x12, 0x5d, 0xe0, 0x4f, 0x0b, 0x96, - 0x23, 0x69, 0x45, 0x5b, 0x69, 0x9f, 0xaf, 0x09, 0xd4, 0x05, 0x08, 0x53, 0x17, 0xff, 0xbd, 0x52, - 0x31, 0x8f, 0xbb, 0x94, 0x0e, 0xab, 0xe3, 0xd2, 0x4b, 0xa8, 0xe9, 0x96, 0x31, 0xb1, 0xcf, 0x29, - 0xcb, 0x75, 0x3b, 0x57, 0xce, 0xdb, 0xa7, 0xdf, 0xf6, 0x87, 0x23, 0x76, 0x3e, 0xeb, 0xc9, 0x7d, - 0x6a, 0x36, 0x4c, 0x6a, 0xcf, 0x2e, 0x8c, 0x46, 0x6f, 0x6c, 0xd8, 0xac, 0x91, 0xf1, 0x97, 0xa0, - 0xb7, 0xe8, 0x1c, 0x3e, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x12, 0x52, 0x02, 0x9b, 0x30, 0x08, - 0x00, 0x00, + // 719 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xff, 0x6f, 0xd2, 0x40, + 0x1c, 0xa5, 0x65, 0x05, 0xf6, 0x61, 0x5f, 0x9a, 0x73, 0xd9, 0x17, 0x66, 0xe6, 0x56, 0x35, 0x99, + 0x2e, 0x16, 0x33, 0x35, 0x6a, 0xfc, 0x8a, 0xd0, 0x0c, 0x36, 0xec, 0x4c, 0xcb, 0x5c, 0xb6, 0x18, + 0x4d, 0x81, 0x1b, 0x90, 0xd1, 0x1e, 0xae, 0xc7, 0x92, 0xfd, 0x15, 0xfa, 0x97, 0xf8, 0xab, 0xff, + 0x9e, 0x69, 0xaf, 0x85, 0x1b, 0xb4, 0xdd, 0x12, 0x7f, 0xe3, 0xee, 0xde, 0x7b, 0x9f, 0xf7, 0xae, + 0x7d, 0x05, 0x1e, 0x0c, 0x2e, 0x08, 0x25, 0xcd, 0xe1, 0x59, 0xd1, 0xb6, 0x1c, 0xab, 0x83, 0x6d, + 0xec, 0x50, 0xee, 0xa7, 0xea, 0x1f, 0x23, 0x18, 0xef, 0x14, 0xd6, 0x3a, 0x84, 0x74, 0xfa, 0xb8, + 0x38, 0x22, 0x5a, 0xce, 0x15, 0x83, 0x15, 0xd6, 0x27, 0x8f, 0xb0, 0x3d, 0xa0, 0xc1, 0xa1, 0xf2, + 0x4b, 0x80, 0x65, 0x9d, 0xb4, 0x71, 0x15, 0x5b, 0x7d, 0xda, 0x2d, 0x77, 0x71, 0xeb, 0xdc, 0xc0, + 0x3f, 0x87, 0xd8, 0xa5, 0xe8, 0x3d, 0x48, 0x83, 0x0b, 0xd2, 0xc4, 0xab, 0xc2, 0xa6, 0xb0, 0xbd, + 0xb0, 0xbb, 0xad, 0x72, 0x06, 0xa2, 0x29, 0xea, 0x17, 0x0f, 0x6f, 0x30, 0x9a, 0xf2, 0x02, 0x24, + 0x7f, 0x8d, 0x16, 0x21, 0x5f, 0xd5, 0x4a, 0xf5, 0x46, 0xb5, 0xa6, 0x6b, 0xa6, 0x29, 0xa7, 0xd0, + 0x1c, 0xe4, 0xea, 0xb5, 0xaf, 0x9a, 0xbf, 0x12, 0xd0, 0x3c, 0xcc, 0x1a, 0x5a, 0xa9, 0xc2, 0x0e, + 0x45, 0xe5, 0x8f, 0x00, 0x2b, 0x53, 0xf2, 0xee, 0x80, 0x38, 0x2e, 0x46, 0x1f, 0x40, 0x72, 0xa9, + 0x45, 0x43, 0x4b, 0x8f, 0x12, 0x2d, 0x31, 0x8e, 0x6a, 0x7a, 0x04, 0x83, 0xf1, 0x14, 0x03, 0x24, + 0x7f, 0x8d, 0xf2, 0x90, 0x65, 0x9e, 0x4e, 0xe4, 0x94, 0xe7, 0xe0, 0x48, 0x0f, 0x97, 0x02, 0x9a, + 0x05, 0xa9, 0xe4, 0xf9, 0x93, 0x45, 0x94, 0x83, 0x99, 0x8a, 0x56, 0xaa, 0xc8, 0x69, 0x6f, 0xd3, + 0x73, 0x79, 0x22, 0xcf, 0x78, 0x70, 0xfd, 0xb0, 0xf1, 0x83, 0x2d, 0x25, 0x65, 0x0b, 0x16, 0xbd, + 0xd9, 0x35, 0xe7, 0x8c, 0x84, 0x57, 0xb7, 0x00, 0x62, 0xaf, 0xed, 0x9b, 0x9c, 0x35, 0xc4, 0x5e, + 0x5b, 0xf9, 0x0e, 0xf2, 0x18, 0x12, 0x64, 0x79, 0x0e, 0xe0, 0x90, 0x36, 0x2e, 0x13, 0xe7, 0xac, + 0xd7, 0xf1, 0xb1, 0xf9, 0xdd, 0x25, 0x95, 0x3d, 0x2b, 0x35, 0x7c, 0x56, 0x6a, 0xc9, 0xb9, 0x32, + 0x38, 0x1c, 0x5a, 0x0a, 0x6f, 0x40, 0xf4, 0xc5, 0x83, 0x58, 0xa7, 0x80, 0xca, 0xfd, 0xa1, 0x4b, + 0xf1, 0xc5, 0x3e, 0xe9, 0x39, 0x31, 0x2e, 0x26, 0x26, 0x8a, 0xb7, 0x9b, 0xa8, 0x3c, 0x84, 0x3b, + 0x81, 0x76, 0x1d, 0x5b, 0x97, 0x38, 0x2e, 0xa2, 0x36, 0x82, 0x5d, 0x4b, 0xa9, 0x42, 0xb6, 0xc5, + 0xb6, 0x13, 0x23, 0x86, 0x20, 0x65, 0x03, 0x60, 0x0f, 0xd3, 0x70, 0x88, 0x0c, 0xe9, 0x73, 0x7c, + 0x15, 0x4c, 0xf1, 0x7e, 0x2a, 0xaf, 0x21, 0xef, 0x9f, 0x07, 0xf2, 0x8f, 0x41, 0xba, 0xb4, 0xfa, + 0x43, 0x9c, 0x28, 0xce, 0x20, 0xca, 0x3e, 0x80, 0x99, 0x20, 0x3d, 0xd6, 0x12, 0x6f, 0xd6, 0xda, + 0x82, 0xf9, 0x0a, 0xee, 0x63, 0x8a, 0xe3, 0x9d, 0x6e, 0xc2, 0xdc, 0xb1, 0x45, 0x5b, 0xdd, 0x78, + 0xc4, 0x5f, 0x01, 0xe6, 0x03, 0x48, 0x10, 0xe7, 0x0d, 0x64, 0x5b, 0xc4, 0xb6, 0x2d, 0xa7, 0x1d, + 0xbc, 0xe1, 0x5b, 0xfc, 0x1b, 0x7e, 0x0d, 0xab, 0x96, 0x19, 0xd0, 0x08, 0x19, 0xe1, 0x00, 0x31, + 0x22, 0x51, 0xfa, 0xe6, 0x44, 0x3b, 0x90, 0x0d, 0x14, 0xbd, 0x6e, 0x1c, 0xe9, 0x07, 0xfa, 0xe1, + 0xb1, 0x2e, 0xa7, 0x50, 0x16, 0xd2, 0xa6, 0xd6, 0x90, 0x05, 0x04, 0x90, 0xa9, 0x68, 0x75, 0xad, + 0xa1, 0xc9, 0xe2, 0xee, 0xef, 0x0c, 0xc0, 0xe7, 0x91, 0x31, 0xf4, 0x8d, 0x35, 0x80, 0x6b, 0x1f, + 0x52, 0x6e, 0xfe, 0x5a, 0x14, 0xee, 0xdf, 0xa2, 0xbe, 0x4a, 0x0a, 0xed, 0x41, 0x2e, 0x2c, 0x0f, + 0x5a, 0x9f, 0xa4, 0x70, 0xad, 0x2b, 0xdc, 0x8d, 0x3e, 0xe4, 0x84, 0xf2, 0x5c, 0x4b, 0xd0, 0x06, + 0x0f, 0x9f, 0xae, 0x4f, 0x61, 0x79, 0xea, 0xba, 0x34, 0xef, 0xc3, 0xa9, 0xa4, 0x50, 0x0d, 0xe6, + 0xf8, 0x4a, 0xa0, 0x7b, 0x11, 0x4a, 0x7c, 0x59, 0x12, 0xa4, 0xaa, 0x23, 0x4f, 0x7e, 0xbe, 0x18, + 0x60, 0x21, 0x6a, 0xc2, 0x44, 0xba, 0x83, 0x91, 0x29, 0xff, 0x3d, 0xf9, 0x0f, 0xa9, 0xa7, 0x02, + 0x7a, 0x05, 0xe9, 0x3d, 0x4c, 0xd1, 0x32, 0x8f, 0x1d, 0xf7, 0xb2, 0xb0, 0x32, 0xb5, 0x3f, 0xb2, + 0xf1, 0x12, 0xd2, 0xe6, 0x24, 0x73, 0x5c, 0xbb, 0x84, 0x9b, 0x78, 0x07, 0x19, 0x56, 0x29, 0xb4, + 0xc6, 0x73, 0xaf, 0xd5, 0x2c, 0x81, 0xfe, 0x11, 0x24, 0x96, 0x7b, 0x35, 0xa2, 0x32, 0x8c, 0xbc, + 0x16, 0x5b, 0x26, 0x3f, 0xf3, 0x5b, 0xc8, 0x99, 0x8e, 0x35, 0x70, 0xbb, 0x84, 0xc6, 0x5e, 0x5e, + 0xec, 0xfc, 0x4f, 0x4f, 0x4e, 0x77, 0x3a, 0x3d, 0xda, 0x1d, 0x36, 0xd5, 0x16, 0xb1, 0x8b, 0x36, + 0x71, 0x87, 0xe7, 0x56, 0xb1, 0xd9, 0xb7, 0x5c, 0x5a, 0x8c, 0xf8, 0x33, 0x6f, 0x66, 0xfc, 0xcd, + 0x67, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x53, 0x09, 0x0b, 0x91, 0xea, 0x07, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -744,17 +749,16 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ManagementClient interface { - LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) - ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) - GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) - SetNode(ctx context.Context, in *SetNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) - DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) - GetCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetClusterResponse, error) - WatchCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Management_WatchClusterClient, error) - GetValue(ctx context.Context, in *GetValueRequest, opts ...grpc.CallOption) (*GetValueResponse, error) - SetValue(ctx context.Context, in *SetValueRequest, opts ...grpc.CallOption) (*empty.Empty, error) - DeleteValue(ctx context.Context, in *DeleteValueRequest, opts ...grpc.CallOption) (*empty.Empty, error) - WatchStore(ctx context.Context, in *WatchStoreRequest, opts ...grpc.CallOption) (Management_WatchStoreClient, error) + NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) + NodeInfo(ctx context.Context, in *NodeInfoRequest, opts ...grpc.CallOption) (*NodeInfoResponse, error) + ClusterJoin(ctx context.Context, in *ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) + ClusterLeave(ctx context.Context, in *ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) + ClusterInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterInfoResponse, error) + ClusterWatch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Management_ClusterWatchClient, error) + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) + Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Watch(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (Management_WatchClient, error) Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) } @@ -766,66 +770,57 @@ func NewManagementClient(cc *grpc.ClientConn) ManagementClient { return &managementClient{cc} } -func (c *managementClient) LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) { - out := new(LivenessProbeResponse) - err := c.cc.Invoke(ctx, "/management.Management/LivenessProbe", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managementClient) ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) { - out := new(ReadinessProbeResponse) - err := c.cc.Invoke(ctx, "/management.Management/ReadinessProbe", in, out, opts...) +func (c *managementClient) NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) { + out := new(NodeHealthCheckResponse) + err := c.cc.Invoke(ctx, "/management.Management/NodeHealthCheck", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *managementClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) { - out := new(GetNodeResponse) - err := c.cc.Invoke(ctx, "/management.Management/GetNode", in, out, opts...) +func (c *managementClient) NodeInfo(ctx context.Context, in *NodeInfoRequest, opts ...grpc.CallOption) (*NodeInfoResponse, error) { + out := new(NodeInfoResponse) + err := c.cc.Invoke(ctx, "/management.Management/NodeInfo", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *managementClient) SetNode(ctx context.Context, in *SetNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { +func (c *managementClient) ClusterJoin(ctx context.Context, in *ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/management.Management/SetNode", in, out, opts...) + err := c.cc.Invoke(ctx, "/management.Management/ClusterJoin", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *managementClient) DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { +func (c *managementClient) ClusterLeave(ctx context.Context, in *ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/management.Management/DeleteNode", in, out, opts...) + err := c.cc.Invoke(ctx, "/management.Management/ClusterLeave", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *managementClient) GetCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetClusterResponse, error) { - out := new(GetClusterResponse) - err := c.cc.Invoke(ctx, "/management.Management/GetCluster", in, out, opts...) +func (c *managementClient) ClusterInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterInfoResponse, error) { + out := new(ClusterInfoResponse) + err := c.cc.Invoke(ctx, "/management.Management/ClusterInfo", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *managementClient) WatchCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Management_WatchClusterClient, error) { - stream, err := c.cc.NewStream(ctx, &_Management_serviceDesc.Streams[0], "/management.Management/WatchCluster", opts...) +func (c *managementClient) ClusterWatch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Management_ClusterWatchClient, error) { + stream, err := c.cc.NewStream(ctx, &_Management_serviceDesc.Streams[0], "/management.Management/ClusterWatch", opts...) if err != nil { return nil, err } - x := &managementWatchClusterClient{stream} + x := &managementClusterWatchClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -835,56 +830,56 @@ func (c *managementClient) WatchCluster(ctx context.Context, in *empty.Empty, op return x, nil } -type Management_WatchClusterClient interface { - Recv() (*GetClusterResponse, error) +type Management_ClusterWatchClient interface { + Recv() (*ClusterInfoResponse, error) grpc.ClientStream } -type managementWatchClusterClient struct { +type managementClusterWatchClient struct { grpc.ClientStream } -func (x *managementWatchClusterClient) Recv() (*GetClusterResponse, error) { - m := new(GetClusterResponse) +func (x *managementClusterWatchClient) Recv() (*ClusterInfoResponse, error) { + m := new(ClusterInfoResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } -func (c *managementClient) GetValue(ctx context.Context, in *GetValueRequest, opts ...grpc.CallOption) (*GetValueResponse, error) { - out := new(GetValueResponse) - err := c.cc.Invoke(ctx, "/management.Management/GetValue", in, out, opts...) +func (c *managementClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { + out := new(GetResponse) + err := c.cc.Invoke(ctx, "/management.Management/Get", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *managementClient) SetValue(ctx context.Context, in *SetValueRequest, opts ...grpc.CallOption) (*empty.Empty, error) { +func (c *managementClient) Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/management.Management/SetValue", in, out, opts...) + err := c.cc.Invoke(ctx, "/management.Management/Set", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *managementClient) DeleteValue(ctx context.Context, in *DeleteValueRequest, opts ...grpc.CallOption) (*empty.Empty, error) { +func (c *managementClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/management.Management/DeleteValue", in, out, opts...) + err := c.cc.Invoke(ctx, "/management.Management/Delete", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *managementClient) WatchStore(ctx context.Context, in *WatchStoreRequest, opts ...grpc.CallOption) (Management_WatchStoreClient, error) { - stream, err := c.cc.NewStream(ctx, &_Management_serviceDesc.Streams[1], "/management.Management/WatchStore", opts...) +func (c *managementClient) Watch(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (Management_WatchClient, error) { + stream, err := c.cc.NewStream(ctx, &_Management_serviceDesc.Streams[1], "/management.Management/Watch", opts...) if err != nil { return nil, err } - x := &managementWatchStoreClient{stream} + x := &managementWatchClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -894,17 +889,17 @@ func (c *managementClient) WatchStore(ctx context.Context, in *WatchStoreRequest return x, nil } -type Management_WatchStoreClient interface { - Recv() (*WatchStoreResponse, error) +type Management_WatchClient interface { + Recv() (*WatchResponse, error) grpc.ClientStream } -type managementWatchStoreClient struct { +type managementWatchClient struct { grpc.ClientStream } -func (x *managementWatchStoreClient) Recv() (*WatchStoreResponse, error) { - m := new(WatchStoreResponse) +func (x *managementWatchClient) Recv() (*WatchResponse, error) { + m := new(WatchResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } @@ -922,17 +917,16 @@ func (c *managementClient) Snapshot(ctx context.Context, in *empty.Empty, opts . // ManagementServer is the server API for Management service. type ManagementServer interface { - LivenessProbe(context.Context, *empty.Empty) (*LivenessProbeResponse, error) - ReadinessProbe(context.Context, *empty.Empty) (*ReadinessProbeResponse, error) - GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error) - SetNode(context.Context, *SetNodeRequest) (*empty.Empty, error) - DeleteNode(context.Context, *DeleteNodeRequest) (*empty.Empty, error) - GetCluster(context.Context, *empty.Empty) (*GetClusterResponse, error) - WatchCluster(*empty.Empty, Management_WatchClusterServer) error - GetValue(context.Context, *GetValueRequest) (*GetValueResponse, error) - SetValue(context.Context, *SetValueRequest) (*empty.Empty, error) - DeleteValue(context.Context, *DeleteValueRequest) (*empty.Empty, error) - WatchStore(*WatchStoreRequest, Management_WatchStoreServer) error + NodeHealthCheck(context.Context, *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) + NodeInfo(context.Context, *NodeInfoRequest) (*NodeInfoResponse, error) + ClusterJoin(context.Context, *ClusterJoinRequest) (*empty.Empty, error) + ClusterLeave(context.Context, *ClusterLeaveRequest) (*empty.Empty, error) + ClusterInfo(context.Context, *empty.Empty) (*ClusterInfoResponse, error) + ClusterWatch(*empty.Empty, Management_ClusterWatchServer) error + Get(context.Context, *GetRequest) (*GetResponse, error) + Set(context.Context, *SetRequest) (*empty.Empty, error) + Delete(context.Context, *DeleteRequest) (*empty.Empty, error) + Watch(*WatchRequest, Management_WatchServer) error Snapshot(context.Context, *empty.Empty) (*empty.Empty, error) } @@ -940,207 +934,189 @@ func RegisterManagementServer(s *grpc.Server, srv ManagementServer) { s.RegisterService(&_Management_serviceDesc, srv) } -func _Management_LivenessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) +func _Management_NodeHealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeHealthCheckRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(ManagementServer).LivenessProbe(ctx, in) + return srv.(ManagementServer).NodeHealthCheck(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/management.Management/LivenessProbe", + FullMethod: "/management.Management/NodeHealthCheck", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).LivenessProbe(ctx, req.(*empty.Empty)) + return srv.(ManagementServer).NodeHealthCheck(ctx, req.(*NodeHealthCheckRequest)) } return interceptor(ctx, in, info, handler) } -func _Management_ReadinessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagementServer).ReadinessProbe(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/management.Management/ReadinessProbe", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).ReadinessProbe(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Management_GetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetNodeRequest) +func _Management_NodeInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeInfoRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(ManagementServer).GetNode(ctx, in) + return srv.(ManagementServer).NodeInfo(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/management.Management/GetNode", + FullMethod: "/management.Management/NodeInfo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).GetNode(ctx, req.(*GetNodeRequest)) + return srv.(ManagementServer).NodeInfo(ctx, req.(*NodeInfoRequest)) } return interceptor(ctx, in, info, handler) } -func _Management_SetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetNodeRequest) +func _Management_ClusterJoin_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ClusterJoinRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(ManagementServer).SetNode(ctx, in) + return srv.(ManagementServer).ClusterJoin(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/management.Management/SetNode", + FullMethod: "/management.Management/ClusterJoin", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).SetNode(ctx, req.(*SetNodeRequest)) + return srv.(ManagementServer).ClusterJoin(ctx, req.(*ClusterJoinRequest)) } return interceptor(ctx, in, info, handler) } -func _Management_DeleteNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteNodeRequest) +func _Management_ClusterLeave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ClusterLeaveRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(ManagementServer).DeleteNode(ctx, in) + return srv.(ManagementServer).ClusterLeave(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/management.Management/DeleteNode", + FullMethod: "/management.Management/ClusterLeave", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).DeleteNode(ctx, req.(*DeleteNodeRequest)) + return srv.(ManagementServer).ClusterLeave(ctx, req.(*ClusterLeaveRequest)) } return interceptor(ctx, in, info, handler) } -func _Management_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _Management_ClusterInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(ManagementServer).GetCluster(ctx, in) + return srv.(ManagementServer).ClusterInfo(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/management.Management/GetCluster", + FullMethod: "/management.Management/ClusterInfo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).GetCluster(ctx, req.(*empty.Empty)) + return srv.(ManagementServer).ClusterInfo(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } -func _Management_WatchCluster_Handler(srv interface{}, stream grpc.ServerStream) error { +func _Management_ClusterWatch_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(empty.Empty) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(ManagementServer).WatchCluster(m, &managementWatchClusterServer{stream}) + return srv.(ManagementServer).ClusterWatch(m, &managementClusterWatchServer{stream}) } -type Management_WatchClusterServer interface { - Send(*GetClusterResponse) error +type Management_ClusterWatchServer interface { + Send(*ClusterInfoResponse) error grpc.ServerStream } -type managementWatchClusterServer struct { +type managementClusterWatchServer struct { grpc.ServerStream } -func (x *managementWatchClusterServer) Send(m *GetClusterResponse) error { +func (x *managementClusterWatchServer) Send(m *ClusterInfoResponse) error { return x.ServerStream.SendMsg(m) } -func _Management_GetValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetValueRequest) +func _Management_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(ManagementServer).GetValue(ctx, in) + return srv.(ManagementServer).Get(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/management.Management/GetValue", + FullMethod: "/management.Management/Get", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).GetValue(ctx, req.(*GetValueRequest)) + return srv.(ManagementServer).Get(ctx, req.(*GetRequest)) } return interceptor(ctx, in, info, handler) } -func _Management_SetValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetValueRequest) +func _Management_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(ManagementServer).SetValue(ctx, in) + return srv.(ManagementServer).Set(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/management.Management/SetValue", + FullMethod: "/management.Management/Set", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).SetValue(ctx, req.(*SetValueRequest)) + return srv.(ManagementServer).Set(ctx, req.(*SetRequest)) } return interceptor(ctx, in, info, handler) } -func _Management_DeleteValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteValueRequest) +func _Management_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(ManagementServer).DeleteValue(ctx, in) + return srv.(ManagementServer).Delete(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/management.Management/DeleteValue", + FullMethod: "/management.Management/Delete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).DeleteValue(ctx, req.(*DeleteValueRequest)) + return srv.(ManagementServer).Delete(ctx, req.(*DeleteRequest)) } return interceptor(ctx, in, info, handler) } -func _Management_WatchStore_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(WatchStoreRequest) +func _Management_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(WatchRequest) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(ManagementServer).WatchStore(m, &managementWatchStoreServer{stream}) + return srv.(ManagementServer).Watch(m, &managementWatchServer{stream}) } -type Management_WatchStoreServer interface { - Send(*WatchStoreResponse) error +type Management_WatchServer interface { + Send(*WatchResponse) error grpc.ServerStream } -type managementWatchStoreServer struct { +type managementWatchServer struct { grpc.ServerStream } -func (x *managementWatchStoreServer) Send(m *WatchStoreResponse) error { +func (x *managementWatchServer) Send(m *WatchResponse) error { return x.ServerStream.SendMsg(m) } @@ -1167,40 +1143,36 @@ var _Management_serviceDesc = grpc.ServiceDesc{ HandlerType: (*ManagementServer)(nil), Methods: []grpc.MethodDesc{ { - MethodName: "LivenessProbe", - Handler: _Management_LivenessProbe_Handler, - }, - { - MethodName: "ReadinessProbe", - Handler: _Management_ReadinessProbe_Handler, + MethodName: "NodeHealthCheck", + Handler: _Management_NodeHealthCheck_Handler, }, { - MethodName: "GetNode", - Handler: _Management_GetNode_Handler, + MethodName: "NodeInfo", + Handler: _Management_NodeInfo_Handler, }, { - MethodName: "SetNode", - Handler: _Management_SetNode_Handler, + MethodName: "ClusterJoin", + Handler: _Management_ClusterJoin_Handler, }, { - MethodName: "DeleteNode", - Handler: _Management_DeleteNode_Handler, + MethodName: "ClusterLeave", + Handler: _Management_ClusterLeave_Handler, }, { - MethodName: "GetCluster", - Handler: _Management_GetCluster_Handler, + MethodName: "ClusterInfo", + Handler: _Management_ClusterInfo_Handler, }, { - MethodName: "GetValue", - Handler: _Management_GetValue_Handler, + MethodName: "Get", + Handler: _Management_Get_Handler, }, { - MethodName: "SetValue", - Handler: _Management_SetValue_Handler, + MethodName: "Set", + Handler: _Management_Set_Handler, }, { - MethodName: "DeleteValue", - Handler: _Management_DeleteValue_Handler, + MethodName: "Delete", + Handler: _Management_Delete_Handler, }, { MethodName: "Snapshot", @@ -1209,13 +1181,13 @@ var _Management_serviceDesc = grpc.ServiceDesc{ }, Streams: []grpc.StreamDesc{ { - StreamName: "WatchCluster", - Handler: _Management_WatchCluster_Handler, + StreamName: "ClusterWatch", + Handler: _Management_ClusterWatch_Handler, ServerStreams: true, }, { - StreamName: "WatchStore", - Handler: _Management_WatchStore_Handler, + StreamName: "Watch", + Handler: _Management_Watch_Handler, ServerStreams: true, }, }, diff --git a/protobuf/management/management.proto b/protobuf/management/management.proto index dfeb9d2..e3fc2cd 100644 --- a/protobuf/management/management.proto +++ b/protobuf/management/management.proto @@ -22,91 +22,87 @@ package management; option go_package = "github.com/mosuka/blast/protobuf/management"; service Management { - rpc LivenessProbe (google.protobuf.Empty) returns (LivenessProbeResponse) {} - rpc ReadinessProbe (google.protobuf.Empty) returns (ReadinessProbeResponse) {} - - rpc GetNode (GetNodeRequest) returns (GetNodeResponse) {} - rpc SetNode (SetNodeRequest) returns (google.protobuf.Empty) {} - rpc DeleteNode (DeleteNodeRequest) returns (google.protobuf.Empty) {} - rpc GetCluster (google.protobuf.Empty) returns (GetClusterResponse) {} - rpc WatchCluster (google.protobuf.Empty) returns (stream GetClusterResponse) {} - - rpc GetValue (GetValueRequest) returns (GetValueResponse) {} - rpc SetValue (SetValueRequest) returns (google.protobuf.Empty) {} - rpc DeleteValue (DeleteValueRequest) returns (google.protobuf.Empty) {} - rpc WatchStore (WatchStoreRequest) returns (stream WatchStoreResponse) {} + rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) {} + rpc NodeInfo (NodeInfoRequest) returns (NodeInfoResponse) {} + + rpc ClusterJoin (ClusterJoinRequest) returns (google.protobuf.Empty) {} + rpc ClusterLeave (ClusterLeaveRequest) returns (google.protobuf.Empty) {} + rpc ClusterInfo (google.protobuf.Empty) returns (ClusterInfoResponse) {} + rpc ClusterWatch (google.protobuf.Empty) returns (stream ClusterInfoResponse) {} + + rpc Get (GetRequest) returns (GetResponse) {} + rpc Set (SetRequest) returns (google.protobuf.Empty) {} + rpc Delete (DeleteRequest) returns (google.protobuf.Empty) {} + rpc Watch (WatchRequest) returns (stream WatchResponse) {} rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) {} } -// use for health check -message LivenessProbeResponse { - enum State { - UNKNOWN = 0; - ALIVE = 1; - DEAD = 2; +message NodeHealthCheckRequest { + enum Probe { + HEALTHINESS = 0; + LIVENESS = 1; + READINESS = 2; } - State state = 1; + Probe probe = 1; } -// use for health check -message ReadinessProbeResponse { +message NodeHealthCheckResponse { enum State { - UNKNOWN = 0; - READY = 1; - NOT_READY = 2; + HEALTHY = 0; + UNHEALTHY = 1; + ALIVE = 2; + DEAD = 3; + READY = 4; + NOT_READY = 5; } State state = 1; } -// use for raft cluster status -message GetNodeRequest { +// use for raft +message NodeInfoRequest { string id = 1; } -// use for raft cluster status -message GetNodeResponse { +message NodeInfoResponse { google.protobuf.Any nodeConfig = 1; string state = 2; } -// use for raft cluster status -message SetNodeRequest { +message ClusterJoinRequest { string id = 1; google.protobuf.Any nodeConfig = 2; } -// use for raft cluster status -message DeleteNodeRequest { +message ClusterLeaveRequest { string id = 1; } -// use for raft cluster status -message GetClusterResponse { +message ClusterInfoResponse { google.protobuf.Any cluster = 1; } -message GetValueRequest { +message GetRequest { string key = 1; } -message GetValueResponse { +message GetResponse { google.protobuf.Any value = 1; } -message SetValueRequest { +message SetRequest { string key = 1; google.protobuf.Any value = 2; } -message DeleteValueRequest { +message DeleteRequest { string key = 1; } -message WatchStoreRequest { +message WatchRequest { string key = 1; } -message WatchStoreResponse { +message WatchResponse { enum Command { UNKNOWN = 0; SET = 1; From bde9060a098d75b0376061d597172dcdfa2b1c57 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 30 Jul 2019 16:43:12 +0900 Subject: [PATCH 08/76] Change protobuf (#87) --- cmd/blast/main.go | 4 +- cmd/blast/manager_cluster_watch.go | 14 +- cmd/blast/manager_start.go | 23 +- dispatcher/grpc_service.go | 94 +- go.mod | 4 +- go.sum | 8 +- hashutils/hashutils.go | 32 + indexer/grpc_service.go | 98 +- manager/grpc_client.go | 65 +- manager/grpc_service.go | 204 ++-- manager/raft_fsm.go | 69 +- manager/raft_fsm_test.go | 213 ++-- manager/raft_server.go | 115 +- manager/server.go | 47 +- manager/server_test.go | 1636 +++++++++++++++----------- protobuf/management/management.pb.go | 294 +++-- protobuf/management/management.proto | 22 +- 17 files changed, 1654 insertions(+), 1288 deletions(-) create mode 100644 hashutils/hashutils.go diff --git a/cmd/blast/main.go b/cmd/blast/main.go index 42adee0..1a04817 100644 --- a/cmd/blast/main.go +++ b/cmd/blast/main.go @@ -289,7 +289,7 @@ func main() { //}, cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5100", Usage: "The gRPC address of the node for which to retrieve the node information", }, }, @@ -321,7 +321,7 @@ func main() { //}, cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5100", Usage: "The gRPC address of the node for which to retrieve the node information", }, }, diff --git a/cmd/blast/manager_cluster_watch.go b/cmd/blast/manager_cluster_watch.go index 8bef44b..0e74722 100644 --- a/cmd/blast/manager_cluster_watch.go +++ b/cmd/blast/manager_cluster_watch.go @@ -16,14 +16,12 @@ package main import ( "encoding/json" - "errors" "fmt" "io" "log" "os" "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf" "github.com/urfave/cli" ) @@ -61,17 +59,7 @@ func managerClusterWatch(c *cli.Context) error { break } - cluster, err := protobuf.MarshalAny(resp.Cluster) - if err != nil { - return err - } - if cluster == nil { - return errors.New("nil") - } - - var clusterBytes []byte - clusterMap := *cluster.(*map[string]interface{}) - clusterBytes, err = json.MarshalIndent(clusterMap, "", " ") + clusterBytes, err := json.MarshalIndent(resp.Cluster, "", " ") if err != nil { return err } diff --git a/cmd/blast/manager_start.go b/cmd/blast/manager_start.go index e53a707..af0be52 100644 --- a/cmd/blast/manager_start.go +++ b/cmd/blast/manager_start.go @@ -24,6 +24,7 @@ import ( "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf/management" "github.com/urfave/cli" ) @@ -91,20 +92,12 @@ func managerStart(c *cli.Context) error { httpLogCompress, ) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - if peerGrpcAddr != "" { - clusterConfig.PeerAddr = peerGrpcAddr - } - - // create node config - nodeConfig := &config.NodeConfig{ - NodeId: nodeId, - BindAddr: nodeAddr, - GRPCAddr: grpcAddr, - HTTPAddr: httpAddr, - DataDir: dataDir, - RaftStorageType: raftStorageType, + node := &management.Node{ + BindAddress: nodeAddr, + Metadata: &management.Metadata{ + GrpcAddress: grpcAddr, + HttpAddress: httpAddr, + }, } var err error @@ -127,7 +120,7 @@ func managerStart(c *cli.Context) error { IndexStorageType: indexStorageType, } - svr, err := manager.NewServer(clusterConfig, nodeConfig, indexConfig, logger.Named(nodeId), grpcLogger.Named(nodeId), httpLogger) + svr, err := manager.NewServer(peerGrpcAddr, nodeId, node, dataDir, raftStorageType, indexConfig, logger.Named(nodeId), grpcLogger.Named(nodeId), httpLogger) if err != nil { return err } diff --git a/dispatcher/grpc_service.go b/dispatcher/grpc_service.go index df7d1ad..39152ff 100644 --- a/dispatcher/grpc_service.go +++ b/dispatcher/grpc_service.go @@ -35,6 +35,7 @@ import ( "github.com/mosuka/blast/manager" "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/distribute" + "github.com/mosuka/blast/protobuf/management" "github.com/mosuka/blast/sortutils" "go.uber.org/zap" "google.golang.org/grpc/codes" @@ -45,7 +46,7 @@ type GRPCService struct { managerAddr string logger *zap.Logger - managers map[string]interface{} + managers *management.Cluster managerClients map[string]*manager.GRPCClient updateManagersStopCh chan struct{} updateManagersDoneCh chan struct{} @@ -61,7 +62,7 @@ func NewGRPCService(managerAddr string, logger *zap.Logger) (*GRPCService, error managerAddr: managerAddr, logger: logger, - managers: make(map[string]interface{}, 0), + managers: &management.Cluster{Nodes: make(map[string]*management.Node, 0)}, managerClients: make(map[string]*manager.GRPCClient, 0), indexers: make(map[string]interface{}, 0), @@ -92,20 +93,14 @@ func (s *GRPCService) Stop() error { func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { var client *manager.GRPCClient - for id, node := range s.managers { - nm, ok := node.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("id", id)) - continue - } - - state, ok := nm["state"].(string) - if !ok { - s.logger.Warn("missing state", zap.String("id", id), zap.String("state", state)) + for id, node := range s.managers.Nodes { + if node.Metadata == nil { + s.logger.Warn("missing metadata", zap.String("id", id)) continue } - if state == raft.Leader.String() || state == raft.Follower.String() { + if node.Status == raft.Leader.String() || node.Status == raft.Follower.String() { + var ok bool client, ok = s.managerClients[id] if ok { return client, nil @@ -113,7 +108,7 @@ func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { s.logger.Error("node does not exist", zap.String("id", id)) } } else { - s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", state)) + s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", node.Status)) } } @@ -123,7 +118,7 @@ func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { return nil, err } -func (s *GRPCService) getInitialManagers(managerAddr string) (map[string]interface{}, error) { +func (s *GRPCService) getInitialManagers(managerAddr string) (*management.Cluster, error) { client, err := manager.NewGRPCClient(s.managerAddr) defer func() { err := client.Close() @@ -165,29 +160,21 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.logger.Debug("initialize manager list", zap.Any("managers", s.managers)) // create clients for managers - for nodeId, node := range s.managers { - nm, ok := node.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("node_id", nodeId)) + for nodeId, node := range s.managers.Nodes { + if node.Metadata == nil { + s.logger.Warn("missing metadata", zap.String("node_id", nodeId)) continue } - nodeConfig, ok := nm["node_config"].(map[string]interface{}) - if !ok { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - continue - } - - grpcAddr, ok := nodeConfig["grpc_addr"].(string) - if !ok { - s.logger.Warn("missing gRPC address", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + if node.Metadata.GrpcAddress == "" { + s.logger.Warn("missing gRPC address", zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) continue } - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - client, err := manager.NewGRPCClient(grpcAddr) + s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + client, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) } if client != nil { s.managerClients[nodeId] = client @@ -223,31 +210,18 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.logger.Error(err.Error()) continue } - - // get current manager cluster - managersIntr, err := protobuf.MarshalAny(resp.Cluster) - if err != nil { - s.logger.Error(err.Error()) - continue - } - if managersIntr == nil { - s.logger.Error(err.Error()) - continue - } - managers := *managersIntr.(*map[string]interface{}) + managers := resp.Cluster if !reflect.DeepEqual(s.managers, managers) { // open clients - for nodeId, metadata := range managers { - mm, ok := metadata.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("node_id", nodeId)) + for nodeId, node := range managers.Nodes { + if node.Metadata == nil { + s.logger.Warn("missing metadata", zap.String("node_id", nodeId)) continue } - grpcAddr, ok := mm["grpc_addr"].(string) - if !ok { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + if node.Metadata.GrpcAddress == "" { + s.logger.Warn("missing gRPC address", zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) continue } @@ -255,9 +229,9 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { if exist { s.logger.Debug("client has already exist in manager list", zap.String("node_id", nodeId)) - if client.GetAddress() != grpcAddr { - s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) - s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + if client.GetAddress() != node.Metadata.GrpcAddress { + s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) delete(s.managerClients, nodeId) @@ -266,24 +240,24 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.logger.Error(err.Error(), zap.String("node_id", nodeId)) } - newClient, err := manager.NewGRPCClient(grpcAddr) + newClient, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) } if newClient != nil { s.managerClients[nodeId] = newClient } } else { - s.logger.Debug("gRPC address has not changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) + s.logger.Debug("gRPC address has not changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", node.Metadata.GrpcAddress)) } } else { s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - newClient, err := manager.NewGRPCClient(grpcAddr) + s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + newClient, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) } if newClient != nil { s.managerClients[nodeId] = newClient @@ -293,7 +267,7 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { // close nonexistent clients for nodeId, client := range s.managerClients { - if nodeConfig, exist := managers[nodeId]; !exist { + if nodeConfig, exist := managers.Nodes[nodeId]; !exist { s.logger.Info("this client is no longer in use", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) s.logger.Debug("close client", zap.String("node_id", nodeId), zap.String("grpc_addr", client.GetAddress())) diff --git a/go.mod b/go.mod index e9874b1..c987c7c 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,6 @@ module github.com/mosuka/blast go 1.12 require ( - github.com/armon/gomdb v0.0.0-20180202201627-75f545a47e89 // indirect github.com/blevesearch/bleve v0.7.0 github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3 // indirect github.com/blevesearch/cld2 v0.0.0-20150916130542-10f17c049ec9 // indirect @@ -18,13 +17,13 @@ require ( github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect github.com/golang/protobuf v1.3.1 + github.com/google/go-cmp v0.3.0 github.com/gorilla/mux v1.7.0 github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/hashicorp/golang-lru v0.5.1 // indirect github.com/hashicorp/raft v1.1.0 github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477 - github.com/hashicorp/raft-mdb v0.0.0-20180824152511-9ee9663b6ffa github.com/ikawaha/kagome.ipadic v1.0.1 // indirect github.com/imdario/mergo v0.3.7 github.com/jmhodges/levigo v1.0.0 // indirect @@ -38,7 +37,6 @@ require ( github.com/prometheus/procfs v0.0.0-20190322151404-55ae3d9d5573 // indirect github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 // indirect github.com/stretchr/objx v0.1.1 - github.com/stretchr/testify v1.3.0 github.com/syndtr/goleveldb v1.0.0 // indirect github.com/tebeka/snowball v0.0.0-20130405174319-16e884df4e19 // indirect github.com/tecbot/gorocksdb v0.0.0-20181010114359-8752a9433481 // indirect diff --git a/go.sum b/go.sum index a38a8a7..91ed1f7 100644 --- a/go.sum +++ b/go.sum @@ -15,8 +15,6 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= -github.com/armon/gomdb v0.0.0-20180202201627-75f545a47e89 h1:A1SPjPcl2LdF2Skv9Zt41jWu4XYQAyvBDzrveQjlkhQ= -github.com/armon/gomdb v0.0.0-20180202201627-75f545a47e89/go.mod h1:wSblbytRgcqD+U+gGCKz5145DyjUYPh5fqh2uyXxfZw= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/blevesearch/bleve v0.7.0 h1:znyZ3zjsh2Scr60vszs7rbF29TU6i1q9bfnZf1vh0Ac= @@ -84,6 +82,8 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.7.0 h1:tOSd0UKHQd6urX6ApfOn4XdBMY6Sh1MfxV3kmaazO+U= @@ -113,8 +113,6 @@ github.com/hashicorp/raft v1.1.0 h1:qPMePEczgbkiQsqCsRfuHRqvDUO+zmAInDaD5ptXlq0= github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477 h1:bLsrEmB2NUwkHH18FOJBIa04wOV2RQalJrcafTYu6Lg= github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477/go.mod h1:aUF6HQr8+t3FC/ZHAC+pZreUBhTaxumuu3L+d37uRxk= -github.com/hashicorp/raft-mdb v0.0.0-20180824152511-9ee9663b6ffa h1:ccwcWyXHTaonH6yzx+t/3p9aNm/ogSTfd6YobZOtHmE= -github.com/hashicorp/raft-mdb v0.0.0-20180824152511-9ee9663b6ffa/go.mod h1:ooP3NrrH0GG/sVjF9pbRvhF6nVHRR4mkkwscLqReN1o= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ikawaha/kagome.ipadic v1.0.1 h1:4c/tx3Rga6LvtTouEdvodcfeWWTttATZg8XIH8lRHG4= @@ -231,8 +229,6 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190329044733-9eb1bfa1ce65 h1:hOY+O8MxdkPV10pNf7/XEHaySCiPKxixMKUshfHsGn0= -golang.org/x/sys v0.0.0-20190329044733-9eb1bfa1ce65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed h1:uPxWBzB3+mlnjy9W58qY1j/cjyFjutgw/Vhan2zLy/A= golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= diff --git a/hashutils/hashutils.go b/hashutils/hashutils.go new file mode 100644 index 0000000..2ac1911 --- /dev/null +++ b/hashutils/hashutils.go @@ -0,0 +1,32 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hashutils + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" +) + +func Hash(v interface{}) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err + } + + hb := sha256.Sum256(b) + + return hex.EncodeToString(hb[:]), nil +} diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go index 20971a7..967572d 100644 --- a/indexer/grpc_service.go +++ b/indexer/grpc_service.go @@ -33,14 +33,13 @@ import ( "github.com/mosuka/blast/manager" "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/index" + "github.com/mosuka/blast/protobuf/management" "go.uber.org/zap" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) type GRPCService struct { - //*grpc.Service - clusterConfig *config.ClusterConfig raftServer *RaftServer logger *zap.Logger @@ -53,7 +52,7 @@ type GRPCService struct { clusterChans map[chan index.GetClusterResponse]struct{} clusterMutex sync.RWMutex - managers map[string]interface{} + managers *management.Cluster managerClients map[string]*manager.GRPCClient updateManagersStopCh chan struct{} updateManagersDoneCh chan struct{} @@ -70,7 +69,7 @@ func NewGRPCService(clusterConfig *config.ClusterConfig, raftServer *RaftServer, cluster: make(map[string]interface{}, 0), clusterChans: make(map[chan index.GetClusterResponse]struct{}), - managers: make(map[string]interface{}, 0), + managers: &management.Cluster{Nodes: make(map[string]*management.Node, 0)}, managerClients: make(map[string]*manager.GRPCClient, 0), }, nil } @@ -102,20 +101,14 @@ func (s *GRPCService) Stop() error { func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { var client *manager.GRPCClient - for id, node := range s.managers { - nm, ok := node.(map[string]interface{}) - if !ok { + for id, node := range s.managers.Nodes { + if node.Metadata == nil { s.logger.Warn("assertion failed", zap.String("id", id)) continue } - state, ok := nm["state"].(string) - if !ok { - s.logger.Warn("missing state", zap.String("id", id), zap.String("state", state)) - continue - } - - if state == raft.Leader.String() || state == raft.Follower.String() { + if node.Status == raft.Leader.String() || node.Status == raft.Follower.String() { + var ok bool client, ok = s.managerClients[id] if ok { return client, nil @@ -123,7 +116,7 @@ func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { s.logger.Error("node does not exist", zap.String("id", id)) } } else { - s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", state)) + s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", node.Status)) } } @@ -133,7 +126,7 @@ func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { return nil, err } -func (s *GRPCService) getInitialManagers(managerAddr string) (map[string]interface{}, error) { +func (s *GRPCService) getInitialManagers(managerAddr string) (*management.Cluster, error) { client, err := manager.NewGRPCClient(managerAddr) defer func() { err := client.Close() @@ -175,29 +168,21 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.logger.Debug("initialize manager list", zap.Any("managers", s.managers)) // create clients for managers - for nodeId, node := range s.managers { - nm, ok := node.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("id", nodeId)) + for nodeId, node := range s.managers.Nodes { + if node.Metadata == nil { + s.logger.Warn("missing metadata", zap.String("id", nodeId)) continue } - nodeConfig, ok := nm["node_config"].(map[string]interface{}) - if !ok { - s.logger.Warn("missing metadata", zap.String("id", nodeId), zap.Any("node_config", nodeConfig)) - continue - } - - grpcAddr, ok := nodeConfig["grpc_addr"].(string) - if !ok { - s.logger.Warn("missing gRPC address", zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) + if node.Metadata.GrpcAddress == "" { + s.logger.Warn("missing gRPC address", zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) continue } - s.logger.Debug("create gRPC client", zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) - client, err := manager.NewGRPCClient(grpcAddr) + s.logger.Debug("create gRPC client", zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + client, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) + s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) } if client != nil { s.managerClients[nodeId] = client @@ -232,31 +217,18 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.logger.Error(err.Error()) continue } - - // get current manager cluster - managersIntr, err := protobuf.MarshalAny(resp.Cluster) - if err != nil { - s.logger.Error(err.Error()) - continue - } - if managersIntr == nil { - s.logger.Error(err.Error()) - continue - } - managers := *managersIntr.(*map[string]interface{}) + managers := resp.Cluster if !reflect.DeepEqual(s.managers, managers) { // open clients - for nodeId, nodeConfig := range managers { - mm, ok := nodeConfig.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("node_id", nodeId)) + for nodeId, nodeConfig := range managers.Nodes { + if nodeConfig.Metadata == nil { + s.logger.Warn("missing metadata", zap.String("node_id", nodeId)) continue } - grpcAddr, ok := mm["grpc_addr"].(string) - if !ok { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + if nodeConfig.Metadata.GrpcAddress == "" { + s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) continue } @@ -264,9 +236,9 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { if exist { s.logger.Debug("client has already exist in manager list", zap.String("id", nodeId)) - if client.GetAddress() != grpcAddr { - s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) - s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + if client.GetAddress() != nodeConfig.Metadata.GrpcAddress { + s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) + s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) delete(s.managerClients, nodeId) @@ -275,24 +247,24 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.logger.Error(err.Error(), zap.String("node_id", nodeId)) } - newClient, err := manager.NewGRPCClient(grpcAddr) + newClient, err := manager.NewGRPCClient(nodeConfig.Metadata.GrpcAddress) if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) } if newClient != nil { s.managerClients[nodeId] = newClient } } else { - s.logger.Debug("gRPC address has not changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) + s.logger.Debug("gRPC address has not changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) } } else { s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - newClient, err := manager.NewGRPCClient(grpcAddr) + s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) + newClient, err := manager.NewGRPCClient(nodeConfig.Metadata.GrpcAddress) if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) } if newClient != nil { s.managerClients[nodeId] = newClient @@ -302,7 +274,7 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { // close nonexistent clients for nodeId, client := range s.managerClients { - if nodeConfig, exist := managers[nodeId]; !exist { + if nodeConfig, exist := managers.Nodes[nodeId]; !exist { s.logger.Info("this client is no longer in use", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) s.logger.Debug("close client", zap.String("node_id", nodeId), zap.String("address", client.GetAddress())) @@ -480,6 +452,8 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { // keep current peer nodes s.logger.Debug("current peers", zap.Any("peers", peers)) s.peers = peers + } else { + s.logger.Debug("there is no change in peers", zap.Any("peers", peers)) } // notify current cluster @@ -513,6 +487,8 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { // keep current cluster s.logger.Debug("current cluster", zap.Any("cluster", cluster)) s.cluster = cluster + } else { + s.logger.Debug("there is no change in cluster", zap.Any("cluster", cluster)) } default: time.Sleep(100 * time.Millisecond) diff --git a/manager/grpc_client.go b/manager/grpc_client.go index cbb10c8..7fedf16 100644 --- a/manager/grpc_client.go +++ b/manager/grpc_client.go @@ -45,16 +45,6 @@ func NewGRPCContext() (context.Context, context.CancelFunc) { func NewGRPCClient(address string) (*GRPCClient, error) { ctx, cancel := NewGRPCContext() - //streamRetryOpts := []grpc_retry.CallOption{ - // grpc_retry.Disable(), - //} - - //unaryRetryOpts := []grpc_retry.CallOption{ - // grpc_retry.WithBackoff(grpc_retry.BackoffLinear(100 * time.Millisecond)), - // grpc_retry.WithCodes(codes.Unavailable), - // grpc_retry.WithMax(100), - //} - dialOpts := []grpc.DialOption{ grpc.WithInsecure(), grpc.WithDefaultCallOptions( @@ -95,28 +85,6 @@ func (c *GRPCClient) GetAddress() string { return c.conn.Target() } -//func (c *GRPCClient) LivenessProbe(opts ...grpc.CallOption) (string, error) { -// resp, err := c.client.LivenessProbe(c.ctx, &empty.Empty{}) -// if err != nil { -// st, _ := status.FromError(err) -// -// return management.LivenessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) -// } -// -// return resp.State.String(), nil -//} - -//func (c *GRPCClient) ReadinessProbe(opts ...grpc.CallOption) (string, error) { -// resp, err := c.client.ReadinessProbe(c.ctx, &empty.Empty{}) -// if err != nil { -// st, _ := status.FromError(err) -// -// return management.ReadinessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) -// } -// -// return resp.State.String(), nil -//} - func (c *GRPCClient) NodeHealthCheck(probe string, opts ...grpc.CallOption) (string, error) { req := &management.NodeHealthCheckRequest{} @@ -141,7 +109,7 @@ func (c *GRPCClient) NodeHealthCheck(probe string, opts ...grpc.CallOption) (str return resp.State.String(), nil } -func (c *GRPCClient) NodeInfo(id string, opts ...grpc.CallOption) (map[string]interface{}, error) { +func (c *GRPCClient) NodeInfo(id string, opts ...grpc.CallOption) (*management.Node, error) { req := &management.NodeInfoRequest{ Id: id, } @@ -153,30 +121,16 @@ func (c *GRPCClient) NodeInfo(id string, opts ...grpc.CallOption) (map[string]in return nil, errors.New(st.Message()) } - ins, err := protobuf.MarshalAny(resp.NodeConfig) - nodeConfig := *ins.(*map[string]interface{}) - - node := map[string]interface{}{ - "node_config": nodeConfig, - "state": resp.State, - } - - return node, nil + return resp.Node, nil } -func (c *GRPCClient) ClusterJoin(id string, nodeConfig map[string]interface{}, opts ...grpc.CallOption) error { - nodeConfigAny := &any.Any{} - err := protobuf.UnmarshalAny(nodeConfig, nodeConfigAny) - if err != nil { - return err - } - +func (c *GRPCClient) ClusterJoin(id string, node *management.Node, opts ...grpc.CallOption) error { req := &management.ClusterJoinRequest{ - Id: id, - NodeConfig: nodeConfigAny, + Id: id, + Node: node, } - _, err = c.client.ClusterJoin(c.ctx, req, opts...) + _, err := c.client.ClusterJoin(c.ctx, req, opts...) if err != nil { return err } @@ -197,7 +151,7 @@ func (c *GRPCClient) ClusterLeave(id string, opts ...grpc.CallOption) error { return nil } -func (c *GRPCClient) ClusterInfo(opts ...grpc.CallOption) (map[string]interface{}, error) { +func (c *GRPCClient) ClusterInfo(opts ...grpc.CallOption) (*management.Cluster, error) { resp, err := c.client.ClusterInfo(c.ctx, &empty.Empty{}, opts...) if err != nil { st, _ := status.FromError(err) @@ -205,10 +159,7 @@ func (c *GRPCClient) ClusterInfo(opts ...grpc.CallOption) (map[string]interface{ return nil, errors.New(st.Message()) } - ins, err := protobuf.MarshalAny(resp.Cluster) - cluster := *ins.(*map[string]interface{}) - - return cluster, nil + return resp.Cluster, nil } func (c *GRPCClient) ClusterWatch(opts ...grpc.CallOption) (management.Management_ClusterWatchClient, error) { diff --git a/manager/grpc_service.go b/manager/grpc_service.go index f9254df..67e5d66 100644 --- a/manager/grpc_service.go +++ b/manager/grpc_service.go @@ -17,15 +17,16 @@ package manager import ( "context" "errors" - "reflect" "strings" "sync" "time" "github.com/golang/protobuf/ptypes/any" "github.com/golang/protobuf/ptypes/empty" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/raft" blasterrors "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/hashutils" "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/management" "go.uber.org/zap" @@ -34,16 +35,14 @@ import ( ) type GRPCService struct { - //*grpc.Service - raftServer *RaftServer logger *zap.Logger updateClusterStopCh chan struct{} updateClusterDoneCh chan struct{} - peers map[string]interface{} + peers *management.Cluster peerClients map[string]*GRPCClient - cluster map[string]interface{} + cluster *management.Cluster clusterChans map[chan management.ClusterInfoResponse]struct{} clusterMutex sync.RWMutex @@ -56,9 +55,9 @@ func NewGRPCService(raftServer *RaftServer, logger *zap.Logger) (*GRPCService, e raftServer: raftServer, logger: logger, - peers: make(map[string]interface{}, 0), + peers: &management.Cluster{Nodes: make(map[string]*management.Node, 0)}, peerClients: make(map[string]*GRPCClient, 0), - cluster: make(map[string]interface{}, 0), + cluster: &management.Cluster{Nodes: make(map[string]*management.Node, 0)}, clusterChans: make(map[chan management.ClusterInfoResponse]struct{}), stateChans: make(map[chan management.WatchResponse]struct{}), @@ -82,14 +81,15 @@ func (s *GRPCService) Stop() error { func (s *GRPCService) getLeaderClient() (*GRPCClient, error) { var client *GRPCClient - for id, node := range s.cluster { - state, ok := node.(map[string]interface{})["state"].(string) - if !ok { + for id, node := range s.cluster.Nodes { + state := node.Status + if node.Status == "" { s.logger.Warn("missing state", zap.String("id", id), zap.String("state", state)) continue } if state == raft.Leader.String() { + var ok bool client, ok = s.peerClients[id] if ok { break @@ -121,6 +121,19 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { ticker := time.NewTicker(checkInterval) defer ticker.Stop() + // create initial cluster hash + clusterHash, err := hashutils.Hash(s.cluster) + if err != nil { + s.logger.Error(err.Error()) + return + } + + peersHash, err := hashutils.Hash(s.peers) + if err != nil { + s.logger.Error(err.Error()) + return + } + for { select { case <-s.updateClusterStopCh: @@ -133,25 +146,39 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { return } + // create latest cluster hash + newClusterHash, err := hashutils.Hash(cluster) + if err != nil { + s.logger.Error(err.Error()) + return + } + // create peer node list with out self node - peers := make(map[string]interface{}, 0) - for nodeId, node := range cluster { + peers := &management.Cluster{Nodes: make(map[string]*management.Node, 0)} + for nodeId, node := range cluster.Nodes { if nodeId != s.NodeID() { - peers[nodeId] = node + peers.Nodes[nodeId] = node } } - if !reflect.DeepEqual(s.peers, peers) { + // create latest peers hash + newPeersHash, err := hashutils.Hash(peers) + if err != nil { + s.logger.Error(err.Error()) + return + } + + // compare peers hash + //if !reflect.DeepEqual(s.peers, peers) { + if !cmp.Equal(peersHash, newPeersHash) { // open clients - for nodeId, nodeInfo := range peers { - nodeConfig, ok := nodeInfo.(map[string]interface{})["node_config"].(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("node_id", nodeId), zap.Any("node_info", nodeInfo)) + for nodeId, nodeInfo := range peers.Nodes { + if nodeInfo.Metadata == nil { + s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.Any("metadata", nodeInfo.Metadata)) continue } - grpcAddr, ok := nodeConfig["grpc_addr"].(string) - if !ok { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + if nodeInfo.Metadata.GrpcAddress == "" { + s.logger.Warn("missing gRPC address", zap.String("node_id", nodeId), zap.String("grpc_addr", nodeInfo.Metadata.GrpcAddress)) continue } @@ -159,35 +186,30 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { if exist { s.logger.Debug("client has already exist in peer list", zap.String("node_id", nodeId)) - if client.GetAddress() != grpcAddr { - s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) - s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - + if client.GetAddress() != nodeInfo.Metadata.GrpcAddress { + s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", nodeInfo.Metadata.GrpcAddress)) + s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", nodeInfo.Metadata.GrpcAddress)) delete(s.peerClients, nodeId) - err = client.Close() if err != nil { s.logger.Warn(err.Error(), zap.String("node_id", nodeId)) } - - newClient, err := NewGRPCClient(grpcAddr) + newClient, err := NewGRPCClient(nodeInfo.Metadata.GrpcAddress) if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", nodeInfo.Metadata.GrpcAddress)) } - if newClient != nil { s.peerClients[nodeId] = newClient } } else { - s.logger.Debug("gRPC address has not changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) + s.logger.Debug("gRPC address has not changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", nodeInfo.Metadata.GrpcAddress)) } } else { s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) - - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - peerClient, err := NewGRPCClient(grpcAddr) + s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", nodeInfo.Metadata.GrpcAddress)) + peerClient, err := NewGRPCClient(nodeInfo.Metadata.GrpcAddress) if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", nodeInfo.Metadata.GrpcAddress)) } if peerClient != nil { s.logger.Debug("append peer client to peer client list", zap.String("grpc_addr", peerClient.GetAddress())) @@ -198,7 +220,7 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { // close nonexistent clients for nodeId, client := range s.peerClients { - if nodeConfig, exist := peers[nodeId]; !exist { + if nodeConfig, exist := peers.Nodes[nodeId]; !exist { s.logger.Info("this client is no longer in use", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) s.logger.Debug("close client", zap.String("node_id", nodeId), zap.String("grpc_addr", client.GetAddress())) @@ -215,18 +237,15 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { // keep current peer nodes s.logger.Debug("current peers", zap.Any("peers", peers)) s.peers = peers + } else { + s.logger.Debug("there is no change in peers", zap.Any("peers", peers)) } - // notify current cluster - if !reflect.DeepEqual(s.cluster, cluster) { - // convert to GetClusterResponse for channel output - clusterResp := &management.ClusterInfoResponse{} - clusterAny := &any.Any{} - err = protobuf.UnmarshalAny(cluster, clusterAny) - if err != nil { - s.logger.Warn(err.Error()) + // compare cluster hash + if !cmp.Equal(clusterHash, newClusterHash) { + clusterResp := &management.ClusterInfoResponse{ + Cluster: cluster, } - clusterResp.Cluster = clusterAny // output to channel for c := range s.clusterChans { @@ -235,7 +254,10 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { // keep current cluster s.logger.Debug("current cluster", zap.Any("cluster", cluster)) - s.cluster = cluster + // TODO: overwrite cluster hash + clusterHash = newClusterHash + } else { + s.logger.Debug("there is no change in cluster", zap.Any("cluster", cluster)) } default: time.Sleep(100 * time.Millisecond) @@ -282,39 +304,41 @@ func (s *GRPCService) NodeID() string { return s.raftServer.NodeID() } -func (s *GRPCService) getSelfNode() (map[string]interface{}, error) { - return map[string]interface{}{ - "node_config": s.raftServer.nodeConfig.ToMap(), - "state": s.raftServer.State().String(), - }, nil +func (s *GRPCService) getSelfNode() (*management.Node, error) { + node := s.raftServer.node + node.Status = s.raftServer.State().String() + + return node, nil } -func (s *GRPCService) getPeerNode(id string) (map[string]interface{}, error) { - var nodeInfo map[string]interface{} +func (s *GRPCService) getPeerNode(id string) (*management.Node, error) { + var nodeInfo *management.Node var err error if peerClient, exist := s.peerClients[id]; exist { nodeInfo, err = peerClient.NodeInfo(id) if err != nil { s.logger.Warn(err.Error()) - nodeInfo = map[string]interface{}{ - "node_config": map[string]interface{}{}, - "state": raft.Shutdown.String(), + nodeInfo = &management.Node{ + BindAddress: "", + Status: raft.Shutdown.String(), + Metadata: &management.Metadata{}, } } } else { s.logger.Warn("node does not exist in peer list", zap.String("id", id)) - nodeInfo = map[string]interface{}{ - "node_config": map[string]interface{}{}, - "state": raft.Shutdown.String(), + nodeInfo = &management.Node{ + BindAddress: "", + Status: raft.Shutdown.String(), + Metadata: &management.Metadata{}, } } return nodeInfo, nil } -func (s *GRPCService) getNode(id string) (map[string]interface{}, error) { - var nodeInfo map[string]interface{} +func (s *GRPCService) getNode(id string) (*management.Node, error) { + var nodeInfo *management.Node var err error if id == "" || id == s.NodeID() { @@ -340,30 +364,12 @@ func (s *GRPCService) NodeInfo(ctx context.Context, req *management.NodeInfoRequ return resp, status.Error(codes.Internal, err.Error()) } - nodeConfigAny := &any.Any{} - if nodeConfig, exist := nodeInfo["node_config"]; exist { - err = protobuf.UnmarshalAny(nodeConfig.(map[string]interface{}), nodeConfigAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } else { - s.logger.Error("missing node_config", zap.Any("node_config", nodeConfig)) - } - - state, exist := nodeInfo["state"].(string) - if !exist { - s.logger.Error("missing node state", zap.String("state", state)) - state = raft.Shutdown.String() - } - - resp.NodeConfig = nodeConfigAny - resp.State = state + resp.Node = nodeInfo return resp, nil } -func (s *GRPCService) setNode(id string, nodeConfig map[string]interface{}) error { +func (s *GRPCService) setNode(id string, nodeConfig *management.Node) error { if s.raftServer.IsLeader() { err := s.raftServer.SetNode(id, nodeConfig) if err != nil { @@ -390,15 +396,7 @@ func (s *GRPCService) setNode(id string, nodeConfig map[string]interface{}) erro func (s *GRPCService) ClusterJoin(ctx context.Context, req *management.ClusterJoinRequest) (*empty.Empty, error) { resp := &empty.Empty{} - ins, err := protobuf.MarshalAny(req.NodeConfig) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - nodeConfig := *ins.(*map[string]interface{}) - - err = s.setNode(req.Id, nodeConfig) + err := s.setNode(req.Id, req.Node) if err != nil { s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) @@ -443,26 +441,21 @@ func (s *GRPCService) ClusterLeave(ctx context.Context, req *management.ClusterL return resp, nil } -func (s *GRPCService) getCluster() (map[string]interface{}, error) { +func (s *GRPCService) getCluster() (*management.Cluster, error) { cluster, err := s.raftServer.GetCluster() if err != nil { s.logger.Error(err.Error()) return nil, err } - // update node state - for nodeId := range cluster { + // update latest node state + for nodeId := range cluster.Nodes { node, err := s.getNode(nodeId) if err != nil { - s.logger.Error(err.Error()) - } - state := node["state"].(string) - - if _, ok := cluster[nodeId]; !ok { - cluster[nodeId] = map[string]interface{}{} + s.logger.Warn(err.Error()) + continue } - nodeInfo := cluster[nodeId].(map[string]interface{}) - nodeInfo["state"] = state + cluster.Nodes[nodeId].Status = node.Status } return cluster, nil @@ -477,14 +470,7 @@ func (s *GRPCService) ClusterInfo(ctx context.Context, req *empty.Empty) (*manag return resp, status.Error(codes.Internal, err.Error()) } - clusterAny := &any.Any{} - err = protobuf.UnmarshalAny(cluster, clusterAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.Cluster = clusterAny + resp.Cluster = cluster return resp, nil } diff --git a/manager/raft_fsm.go b/manager/raft_fsm.go index d918e62..75ceb1a 100644 --- a/manager/raft_fsm.go +++ b/manager/raft_fsm.go @@ -24,6 +24,7 @@ import ( "github.com/hashicorp/raft" blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/maputils" + "github.com/mosuka/blast/protobuf/management" "go.uber.org/zap" ) @@ -31,10 +32,11 @@ type RaftFSM struct { path string logger *zap.Logger - metadata maputils.Map - metadataMutex sync.RWMutex + cluster *management.Cluster + clusterMutex sync.RWMutex - data maputils.Map + data maputils.Map + dataMutex sync.RWMutex } func NewRaftFSM(path string, logger *zap.Logger) (*RaftFSM, error) { @@ -46,7 +48,7 @@ func NewRaftFSM(path string, logger *zap.Logger) (*RaftFSM, error) { func (f *RaftFSM) Start() error { f.logger.Info("initialize metadata") - f.metadata = maputils.Map{} + f.cluster = &management.Cluster{Nodes: make(map[string]*management.Node, 0)} f.logger.Info("initialize store data") f.data = maputils.Map{} @@ -58,45 +60,37 @@ func (f *RaftFSM) Stop() error { return nil } -func (f *RaftFSM) GetNodeConfig(nodeId string) (map[string]interface{}, error) { - f.metadataMutex.RLock() - defer f.metadataMutex.RUnlock() +func (f *RaftFSM) GetNodeConfig(nodeId string) (*management.Node, error) { + f.clusterMutex.RLock() + defer f.clusterMutex.RUnlock() - nodeConfig, err := f.metadata.Get(nodeId) - if err != nil { - f.logger.Error(err.Error(), zap.String("node_id", nodeId)) - if err == maputils.ErrNotFound { - return nil, blasterrors.ErrNotFound - } - return nil, err + node, ok := f.cluster.Nodes[nodeId] + if !ok { + return nil, blasterrors.ErrNotFound } - return nodeConfig.(maputils.Map).ToMap(), nil + return node, nil } -func (f *RaftFSM) SetNodeConfig(nodeId string, nodeConfig map[string]interface{}) error { - f.metadataMutex.RLock() - defer f.metadataMutex.RUnlock() +func (f *RaftFSM) SetNodeConfig(nodeId string, node *management.Node) error { + f.clusterMutex.RLock() + defer f.clusterMutex.RUnlock() - err := f.metadata.Merge(nodeId, nodeConfig) - if err != nil { - f.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - return err - } + f.cluster.Nodes[nodeId] = node return nil } func (f *RaftFSM) DeleteNodeConfig(nodeId string) error { - f.metadataMutex.RLock() - defer f.metadataMutex.RUnlock() + f.clusterMutex.RLock() + defer f.clusterMutex.RUnlock() - err := f.metadata.Delete(nodeId) - if err != nil { - f.logger.Error(err.Error(), zap.String("node_id", nodeId)) - return err + if _, ok := f.cluster.Nodes[nodeId]; !ok { + return blasterrors.ErrNotFound } + delete(f.cluster.Nodes, nodeId) + return nil } @@ -178,7 +172,22 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - err = f.SetNodeConfig(data["node_id"].(string), data["node_config"].(map[string]interface{})) + b, err := json.Marshal(data["node"]) + if err != nil { + f.logger.Error(err.Error()) + return &fsmResponse{error: err} + } + var node *management.Node + err = json.Unmarshal(b, &node) + if err != nil { + f.logger.Error(err.Error()) + return &fsmResponse{error: err} + } + err = f.SetNodeConfig(data["node_id"].(string), node) + if err != nil { + f.logger.Error(err.Error()) + return &fsmResponse{error: err} + } return &fsmResponse{error: err} case deleteNode: var data map[string]interface{} diff --git a/manager/raft_fsm_test.go b/manager/raft_fsm_test.go index 8bb2c97..1b6a243 100644 --- a/manager/raft_fsm_test.go +++ b/manager/raft_fsm_test.go @@ -20,7 +20,9 @@ import ( "reflect" "testing" + "github.com/hashicorp/raft" "github.com/mosuka/blast/logutils" + "github.com/mosuka/blast/protobuf/management" ) func TestRaftFSM_GetNode(t *testing.T) { @@ -52,32 +54,54 @@ func TestRaftFSM_GetNode(t *testing.T) { t.Fatalf("%v", err) } - _ = fsm.SetNodeConfig("node1", map[string]interface{}{ - "bind_addr": ":16060", - "grpc_addr": ":17070", - "http_addr": ":18080", - }) - _ = fsm.SetNodeConfig("node2", map[string]interface{}{ - "bind_addr": ":16061", - "grpc_addr": ":17071", - "http_addr": ":18081", - }) - _ = fsm.SetNodeConfig("node3", map[string]interface{}{ - "bind_addr": ":16062", - "grpc_addr": ":17072", - "http_addr": ":18082", - }) + _ = fsm.SetNodeConfig( + "node1", + &management.Node{ + BindAddress: "2100", + Status: raft.Leader.String(), + Metadata: &management.Metadata{ + GrpcAddress: "5100", + HttpAddress: "8100", + }, + }, + ) + _ = fsm.SetNodeConfig( + "node2", + &management.Node{ + BindAddress: "2110", + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: "5110", + HttpAddress: "8110", + }, + }, + ) + _ = fsm.SetNodeConfig( + "node3", + &management.Node{ + BindAddress: "2120", + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: "5120", + HttpAddress: "8120", + }, + }, + ) val1, err := fsm.GetNodeConfig("node2") if err != nil { t.Fatalf("%v", err) } - exp1 := map[string]interface{}{ - "bind_addr": ":16061", - "grpc_addr": ":17071", - "http_addr": ":18081", + exp1 := &management.Node{ + BindAddress: "2110", + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: "5110", + HttpAddress: "8110", + }, } + act1 := val1 if !reflect.DeepEqual(exp1, act1) { t.Fatalf("expected content to see %v, saw %v", exp1, act1) @@ -114,53 +138,82 @@ func TestRaftFSM_SetNode(t *testing.T) { t.Fatalf("%v", err) } - _ = fsm.SetNodeConfig("node1", map[string]interface{}{ - "bind_addr": ":16060", - "grpc_addr": ":17070", - "http_addr": ":18080", - }) - _ = fsm.SetNodeConfig("node2", map[string]interface{}{ - "bind_addr": ":16061", - "grpc_addr": ":17071", - "http_addr": ":18081", - }) - _ = fsm.SetNodeConfig("node3", map[string]interface{}{ - "bind_addr": ":16062", - "grpc_addr": ":17072", - "http_addr": ":18082", - }) + _ = fsm.SetNodeConfig( + "node1", + &management.Node{ + BindAddress: "2100", + Status: raft.Leader.String(), + Metadata: &management.Metadata{ + GrpcAddress: "5100", + HttpAddress: "8100", + }, + }, + ) + _ = fsm.SetNodeConfig( + "node2", + &management.Node{ + BindAddress: "2110", + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: "5110", + HttpAddress: "8110", + }, + }, + ) + _ = fsm.SetNodeConfig( + "node3", + &management.Node{ + BindAddress: "2120", + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: "5120", + HttpAddress: "8120", + }, + }, + ) val1, err := fsm.GetNodeConfig("node2") if err != nil { t.Fatalf("%v", err) } - exp1 := map[string]interface{}{ - "bind_addr": ":16061", - "grpc_addr": ":17071", - "http_addr": ":18081", + exp1 := &management.Node{ + BindAddress: "2110", + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: "5110", + HttpAddress: "8110", + }, } act1 := val1 if !reflect.DeepEqual(exp1, act1) { t.Fatalf("expected content to see %v, saw %v", exp1, act1) } - _ = fsm.SetNodeConfig("node2", map[string]interface{}{ - "bind_addr": ":16061", - "grpc_addr": ":17071", - "http_addr": ":18081", - "leader": true, - }) + _ = fsm.SetNodeConfig( + "node2", + &management.Node{ + BindAddress: "2110", + Status: raft.Shutdown.String(), + Metadata: &management.Metadata{ + GrpcAddress: "5110", + HttpAddress: "8110", + }, + }, + ) val2, err := fsm.GetNodeConfig("node2") if err != nil { t.Fatalf("%v", err) } - exp2 := map[string]interface{}{ - "bind_addr": ":16061", - "grpc_addr": ":17071", - "http_addr": ":18081", - "leader": true, + exp2 := &management.Node{ + BindAddress: "2110", + Status: raft.Shutdown.String(), + Metadata: &management.Metadata{ + GrpcAddress: "5110", + HttpAddress: "8110", + }, } + act2 := val2 if !reflect.DeepEqual(exp2, act2) { t.Fatalf("expected content to see %v, saw %v", exp2, act2) @@ -196,30 +249,51 @@ func TestRaftFSM_DeleteNode(t *testing.T) { t.Fatalf("%v", err) } - _ = fsm.SetNodeConfig("node1", map[string]interface{}{ - "bind_addr": ":16060", - "grpc_addr": ":17070", - "http_addr": ":18080", - }) - _ = fsm.SetNodeConfig("node2", map[string]interface{}{ - "bind_addr": ":16061", - "grpc_addr": ":17071", - "http_addr": ":18081", - }) - _ = fsm.SetNodeConfig("node3", map[string]interface{}{ - "bind_addr": ":16062", - "grpc_addr": ":17072", - "http_addr": ":18082", - }) + _ = fsm.SetNodeConfig( + "node1", + &management.Node{ + BindAddress: "2100", + Status: raft.Leader.String(), + Metadata: &management.Metadata{ + GrpcAddress: "5100", + HttpAddress: "8100", + }, + }, + ) + _ = fsm.SetNodeConfig( + "node2", + &management.Node{ + BindAddress: "2110", + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: "5110", + HttpAddress: "8110", + }, + }, + ) + _ = fsm.SetNodeConfig( + "node3", + &management.Node{ + BindAddress: "2120", + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: "5120", + HttpAddress: "8120", + }, + }, + ) val1, err := fsm.GetNodeConfig("node2") if err != nil { t.Fatalf("%v", err) } - exp1 := map[string]interface{}{ - "bind_addr": ":16061", - "grpc_addr": ":17071", - "http_addr": ":18081", + exp1 := &management.Node{ + BindAddress: "2110", + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: "5110", + HttpAddress: "8110", + }, } act1 := val1 if !reflect.DeepEqual(exp1, act1) { @@ -340,9 +414,6 @@ func TestRaftFSM_Set(t *testing.T) { _ = fsm.SetValue("/", map[string]interface{}{ "a": "A", }, true) - if err != nil { - t.Fatalf("%v", err) - } val2, err := fsm.GetValue("/") if err != nil { t.Fatalf("%v", err) diff --git a/manager/raft_server.go b/manager/raft_server.go index fe29955..b998de7 100644 --- a/manager/raft_server.go +++ b/manager/raft_server.go @@ -30,34 +30,41 @@ import ( _ "github.com/mosuka/blast/builtins" "github.com/mosuka/blast/config" blasterrors "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/protobuf/management" "go.uber.org/zap" //raftmdb "github.com/hashicorp/raft-mdb" ) type RaftServer struct { - nodeConfig *config.NodeConfig - indexConfig *config.IndexConfig - bootstrap bool - logger *zap.Logger + nodeId string + node *management.Node + dataDir string + raftStorageType string + indexConfig *config.IndexConfig + bootstrap bool + logger *zap.Logger raft *raft.Raft fsm *RaftFSM mu sync.RWMutex } -func NewRaftServer(nodeConfig *config.NodeConfig, indexConfig *config.IndexConfig, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { +func NewRaftServer(nodeId string, node *management.Node, dataDir string, raftStorageType string, indexConfig *config.IndexConfig, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { return &RaftServer{ - nodeConfig: nodeConfig, - indexConfig: indexConfig, - bootstrap: bootstrap, - logger: logger, + nodeId: nodeId, + node: node, + dataDir: dataDir, + raftStorageType: raftStorageType, + indexConfig: indexConfig, + bootstrap: bootstrap, + logger: logger, }, nil } func (s *RaftServer) Start() error { var err error - fsmPath := filepath.Join(s.nodeConfig.DataDir, "store") + fsmPath := filepath.Join(s.dataDir, "store") s.logger.Info("create finite state machine", zap.String("path", fsmPath)) s.fsm, err = NewRaftFSM(fsmPath, s.logger) if err != nil { @@ -72,27 +79,27 @@ func (s *RaftServer) Start() error { return err } - s.logger.Info("create Raft config", zap.String("node_id", s.nodeConfig.NodeId)) + s.logger.Info("create Raft config", zap.String("node_id", s.nodeId)) raftConfig := raft.DefaultConfig() - raftConfig.LocalID = raft.ServerID(s.nodeConfig.NodeId) + raftConfig.LocalID = raft.ServerID(s.nodeId) raftConfig.SnapshotThreshold = 1024 raftConfig.LogOutput = ioutil.Discard - s.logger.Info("resolve TCP address", zap.String("bind_addr", s.nodeConfig.BindAddr)) - addr, err := net.ResolveTCPAddr("tcp", s.nodeConfig.BindAddr) + s.logger.Info("resolve TCP address", zap.String("bind_addr", s.node.BindAddress)) + addr, err := net.ResolveTCPAddr("tcp", s.node.BindAddress) if err != nil { s.logger.Fatal(err.Error()) return err } - s.logger.Info("create TCP transport", zap.String("bind_addr", s.nodeConfig.BindAddr)) - transport, err := raft.NewTCPTransport(s.nodeConfig.BindAddr, addr, 3, 10*time.Second, ioutil.Discard) + s.logger.Info("create TCP transport", zap.String("bind_addr", s.node.BindAddress)) + transport, err := raft.NewTCPTransport(s.node.BindAddress, addr, 3, 10*time.Second, ioutil.Discard) if err != nil { s.logger.Fatal(err.Error()) return err } - snapshotPath := s.nodeConfig.DataDir + snapshotPath := s.dataDir s.logger.Info("create snapshot store", zap.String("path", snapshotPath)) snapshotStore, err := raft.NewFileSnapshotStore(snapshotPath, 2, ioutil.Discard) if err != nil { @@ -103,10 +110,10 @@ func (s *RaftServer) Start() error { s.logger.Info("create Raft machine") var logStore raft.LogStore var stableStore raft.StableStore - switch s.nodeConfig.RaftStorageType { + switch s.raftStorageType { case "boltdb": - logStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "log", "boltdb.db") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) + logStorePath := filepath.Join(s.dataDir, "raft", "log", "boltdb.db") + s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) err = os.MkdirAll(filepath.Dir(logStorePath), 0755) if err != nil { s.logger.Fatal(err.Error()) @@ -117,8 +124,8 @@ func (s *RaftServer) Start() error { s.logger.Fatal(err.Error()) return err } - stableStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "stable", "boltdb.db") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) + stableStorePath := filepath.Join(s.dataDir, "raft", "stable", "boltdb.db") + s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) err = os.MkdirAll(filepath.Dir(stableStorePath), 0755) stableStore, err = raftboltdb.NewBoltStore(stableStorePath) if err != nil { @@ -126,8 +133,8 @@ func (s *RaftServer) Start() error { return err } case "badger": - logStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "log") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) + logStorePath := filepath.Join(s.dataDir, "raft", "log") + s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) err = os.MkdirAll(filepath.Join(logStorePath, "badger"), 0755) if err != nil { s.logger.Fatal(err.Error()) @@ -138,8 +145,8 @@ func (s *RaftServer) Start() error { s.logger.Fatal(err.Error()) return err } - stableStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "stable") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) + stableStorePath := filepath.Join(s.dataDir, "raft", "stable") + s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) err = os.MkdirAll(filepath.Join(stableStorePath, "badger"), 0755) if err != nil { s.logger.Fatal(err.Error()) @@ -151,8 +158,8 @@ func (s *RaftServer) Start() error { return err } default: - logStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "log", "boltdb.db") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) + logStorePath := filepath.Join(s.dataDir, "raft", "log", "boltdb.db") + s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) err = os.MkdirAll(filepath.Dir(logStorePath), 0755) if err != nil { s.logger.Fatal(err.Error()) @@ -163,8 +170,8 @@ func (s *RaftServer) Start() error { s.logger.Fatal(err.Error()) return err } - stableStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "stable", "boltdb.db") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) + stableStorePath := filepath.Join(s.dataDir, "raft", "stable", "boltdb.db") + s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) err = os.MkdirAll(filepath.Dir(stableStorePath), 0755) stableStore, err = raftboltdb.NewBoltStore(stableStorePath) if err != nil { @@ -200,8 +207,8 @@ func (s *RaftServer) Start() error { } // set node config - s.logger.Info("register its own node config", zap.String("node_id", s.nodeConfig.NodeId), zap.Any("node_config", s.nodeConfig)) - err = s.setNodeConfig(s.nodeConfig.NodeId, s.nodeConfig.ToMap()) + s.logger.Info("register its own node config", zap.String("node_id", s.nodeId), zap.Any("node", s.node)) + err = s.setNodeConfig(s.nodeId, s.node) if err != nil { s.logger.Fatal(err.Error()) return err @@ -285,7 +292,7 @@ func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { } func (s *RaftServer) NodeID() string { - return s.nodeConfig.NodeId + return s.nodeId } func (s *RaftServer) Stats() map[string]string { @@ -310,7 +317,7 @@ func (s *RaftServer) WaitForDetectLeader(timeout time.Duration) error { return nil } -func (s *RaftServer) getNodeConfig(nodeId string) (map[string]interface{}, error) { +func (s *RaftServer) getNodeConfig(nodeId string) (*management.Node, error) { nodeConfig, err := s.fsm.GetNodeConfig(nodeId) if err != nil { s.logger.Error(err.Error()) @@ -320,12 +327,12 @@ func (s *RaftServer) getNodeConfig(nodeId string) (map[string]interface{}, error return nodeConfig, nil } -func (s *RaftServer) setNodeConfig(nodeId string, nodeConfig map[string]interface{}) error { +func (s *RaftServer) setNodeConfig(nodeId string, node *management.Node) error { msg, err := newMessage( setNode, map[string]interface{}{ - "node_id": nodeId, - "node_config": nodeConfig, + "node_id": nodeId, + "node": node, }, ) if err != nil { @@ -387,7 +394,7 @@ func (s *RaftServer) deleteNodeConfig(nodeId string) error { return nil } -func (s *RaftServer) GetNode(id string) (map[string]interface{}, error) { +func (s *RaftServer) GetNode(id string) (*management.Node, error) { cf := s.raft.GetConfiguration() err := cf.Error() if err != nil { @@ -395,15 +402,14 @@ func (s *RaftServer) GetNode(id string) (map[string]interface{}, error) { return nil, err } - node := make(map[string]interface{}, 0) + var node *management.Node for _, server := range cf.Configuration().Servers { if server.ID == raft.ServerID(id) { - nodeConfig, err := s.getNodeConfig(id) + node, err = s.getNodeConfig(id) if err != nil { s.logger.Error(err.Error()) return nil, err } - node["node_config"] = nodeConfig break } } @@ -411,7 +417,7 @@ func (s *RaftServer) GetNode(id string) (map[string]interface{}, error) { return node, nil } -func (s *RaftServer) SetNode(nodeId string, nodeConfig map[string]interface{}) error { +func (s *RaftServer) SetNode(nodeId string, node *management.Node) error { if !s.IsLeader() { s.logger.Warn(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) return raft.ErrNotLeader @@ -431,15 +437,15 @@ func (s *RaftServer) SetNode(nodeId string, nodeConfig map[string]interface{}) e } } - bindAddr, ok := nodeConfig["bind_addr"].(string) - if !ok { - s.logger.Error("missing metadata", zap.String("bind_addr", bindAddr)) - return errors.New("missing metadata") + if node.BindAddress == "" { + err = errors.New("missing bind address") + s.logger.Error(err.Error(), zap.String("bind_addr", node.BindAddress)) + return err } // add node to Raft cluster - s.logger.Info("add voter", zap.String("nodeId", nodeId), zap.String("address", bindAddr)) - f := s.raft.AddVoter(raft.ServerID(nodeId), raft.ServerAddress(bindAddr), 0, 0) + s.logger.Info("add voter", zap.String("nodeId", nodeId), zap.Any("node", node)) + f := s.raft.AddVoter(raft.ServerID(nodeId), raft.ServerAddress(node.BindAddress), 0, 0) err = f.Error() if err != nil { s.logger.Error(err.Error()) @@ -447,7 +453,7 @@ func (s *RaftServer) SetNode(nodeId string, nodeConfig map[string]interface{}) e } // set node config - err = s.setNodeConfig(nodeId, nodeConfig) + err = s.setNodeConfig(nodeId, node) if err != nil { s.logger.Error(err.Error()) return err @@ -458,7 +464,7 @@ func (s *RaftServer) SetNode(nodeId string, nodeConfig map[string]interface{}) e func (s *RaftServer) DeleteNode(nodeId string) error { if !s.IsLeader() { - s.logger.Warn(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) + s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) return raft.ErrNotLeader } @@ -492,7 +498,7 @@ func (s *RaftServer) DeleteNode(nodeId string) error { return nil } -func (s *RaftServer) GetCluster() (map[string]interface{}, error) { +func (s *RaftServer) GetCluster() (*management.Cluster, error) { cf := s.raft.GetConfiguration() err := cf.Error() if err != nil { @@ -500,14 +506,15 @@ func (s *RaftServer) GetCluster() (map[string]interface{}, error) { return nil, err } - cluster := map[string]interface{}{} + cluster := &management.Cluster{Nodes: make(map[string]*management.Node, 0)} for _, server := range cf.Configuration().Servers { node, err := s.GetNode(string(server.ID)) if err != nil { s.logger.Warn(err.Error()) - node = map[string]interface{}{} + continue } - cluster[string(server.ID)] = node + + cluster.Nodes[string(server.ID)] = node } return cluster, nil diff --git a/manager/server.go b/manager/server.go index b7ebeb1..bb36b92 100644 --- a/manager/server.go +++ b/manager/server.go @@ -17,16 +17,20 @@ package manager import ( accesslog "github.com/mash/go-accesslog" "github.com/mosuka/blast/config" + "github.com/mosuka/blast/protobuf/management" "go.uber.org/zap" ) type Server struct { - clusterConfig *config.ClusterConfig - nodeConfig *config.NodeConfig - indexConfig *config.IndexConfig - logger *zap.Logger - grpcLogger *zap.Logger - httpLogger accesslog.Logger + peerGrpcAddr string + nodeId string + node *management.Node + dataDir string + raftStorageType string + indexConfig *config.IndexConfig + logger *zap.Logger + grpcLogger *zap.Logger + httpLogger accesslog.Logger raftServer *RaftServer grpcService *GRPCService @@ -35,14 +39,17 @@ type Server struct { httpServer *HTTPServer } -func NewServer(clusterConfig *config.ClusterConfig, nodeConfig *config.NodeConfig, indexConfig *config.IndexConfig, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { +func NewServer(peerGrpcAddr string, nodeId string, node *management.Node, dataDir string, raftStorageType string, indexConfig *config.IndexConfig, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { return &Server{ - clusterConfig: clusterConfig, - nodeConfig: nodeConfig, - indexConfig: indexConfig, - logger: logger, - grpcLogger: grpcLogger, - httpLogger: httpLogger, + peerGrpcAddr: peerGrpcAddr, + nodeId: nodeId, + node: node, + dataDir: dataDir, + raftStorageType: raftStorageType, + indexConfig: indexConfig, + logger: logger, + grpcLogger: grpcLogger, + httpLogger: httpLogger, }, nil } @@ -50,11 +57,11 @@ func (s *Server) Start() { var err error // bootstrap node? - bootstrap := s.clusterConfig.PeerAddr == "" + bootstrap := s.peerGrpcAddr == "" s.logger.Info("bootstrap", zap.Bool("bootstrap", bootstrap)) // create raft server - s.raftServer, err = NewRaftServer(s.nodeConfig, s.indexConfig, bootstrap, s.logger) + s.raftServer, err = NewRaftServer(s.nodeId, s.node, s.dataDir, s.raftStorageType, s.indexConfig, bootstrap, s.logger) if err != nil { s.logger.Fatal(err.Error()) return @@ -68,21 +75,21 @@ func (s *Server) Start() { } // create gRPC server - s.grpcServer, err = NewGRPCServer(s.nodeConfig.GRPCAddr, s.grpcService, s.grpcLogger) + s.grpcServer, err = NewGRPCServer(s.node.Metadata.GrpcAddress, s.grpcService, s.grpcLogger) if err != nil { s.logger.Fatal(err.Error()) return } // create HTTP router - s.httpRouter, err = NewRouter(s.nodeConfig.GRPCAddr, s.logger) + s.httpRouter, err = NewRouter(s.node.Metadata.GrpcAddress, s.logger) if err != nil { s.logger.Fatal(err.Error()) return } // create HTTP server - s.httpServer, err = NewHTTPServer(s.nodeConfig.HTTPAddr, s.httpRouter, s.logger, s.httpLogger) + s.httpServer, err = NewHTTPServer(s.node.Metadata.HttpAddress, s.httpRouter, s.logger, s.httpLogger) if err != nil { s.logger.Error(err.Error()) return @@ -124,7 +131,7 @@ func (s *Server) Start() { // join to the existing cluster if !bootstrap { - client, err := NewGRPCClient(s.clusterConfig.PeerAddr) + client, err := NewGRPCClient(s.peerGrpcAddr) defer func() { err := client.Close() if err != nil { @@ -136,7 +143,7 @@ func (s *Server) Start() { return } - err = client.ClusterJoin(s.nodeConfig.NodeId, s.nodeConfig.ToMap()) + err = client.ClusterJoin(s.nodeId, s.node) if err != nil { s.logger.Fatal(err.Error()) return diff --git a/manager/server_test.go b/manager/server_test.go index b4bb963..e5dae37 100644 --- a/manager/server_test.go +++ b/manager/server_test.go @@ -15,6 +15,7 @@ package manager import ( + "fmt" "os" "path/filepath" "reflect" @@ -22,43 +23,44 @@ import ( "time" "github.com/hashicorp/raft" - "github.com/mosuka/blast/config" blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/protobuf/management" + "github.com/mosuka/blast/strutils" "github.com/mosuka/blast/testutils" ) func TestServer_Start(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + raftStorageType := "boltdb" + + node := &management.Node{ + BindAddress: bindAddress, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, nodeId, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -78,32 +80,34 @@ func TestServer_Start(t *testing.T) { func TestServer_HealthCheck(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + raftStorageType := "boltdb" + + node := &management.Node{ + BindAddress: bindAddress, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, nodeId, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -120,7 +124,7 @@ func TestServer_HealthCheck(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -170,32 +174,34 @@ func TestServer_HealthCheck(t *testing.T) { func TestServer_GetNode(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + raftStorageType := "boltdb" + + node := &management.Node{ + BindAddress: bindAddress, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, nodeId, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -212,7 +218,7 @@ func TestServer_GetNode(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -226,13 +232,17 @@ func TestServer_GetNode(t *testing.T) { } // get node - nodeInfo, err := client.NodeInfo(nodeConfig.NodeId) + nodeInfo, err := client.NodeInfo(nodeId) if err != nil { t.Fatalf("%v", err) } - expNodeInfo := map[string]interface{}{ - "node_config": nodeConfig.ToMap(), - "state": "Leader", + expNodeInfo := &management.Node{ + BindAddress: bindAddress, + Status: raft.Leader.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, } actNodeInfo := nodeInfo if !reflect.DeepEqual(expNodeInfo, actNodeInfo) { @@ -243,32 +253,34 @@ func TestServer_GetNode(t *testing.T) { func TestServer_GetCluster(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + raftStorageType := "boltdb" + + node := &management.Node{ + BindAddress: bindAddress, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, nodeId, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -285,7 +297,7 @@ func TestServer_GetCluster(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -303,10 +315,16 @@ func TestServer_GetCluster(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - expCluster := map[string]interface{}{ - nodeConfig.NodeId: map[string]interface{}{ - "node_config": nodeConfig.ToMap(), - "state": "Leader", + expCluster := &management.Cluster{ + Nodes: map[string]*management.Node{ + nodeId: { + BindAddress: bindAddress, + Status: raft.Leader.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + }, }, } actCluster := cluster @@ -315,259 +333,37 @@ func TestServer_GetCluster(t *testing.T) { } } -func TestServer_GetIndexMapping(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - expIndexMapping := indexConfig.IndexMapping - if err != nil { - t.Fatalf("%v", err) - } - - actIntr, err := client.Get("index_config/index_mapping") - if err != nil { - t.Fatalf("%v", err) - } - - actIndexMapping, err := indexutils.NewIndexMappingFromMap(*actIntr.(*map[string]interface{})) - if err != nil { - t.Fatalf("%v", err) - } - - if !reflect.DeepEqual(expIndexMapping, actIndexMapping) { - t.Fatalf("expected content to see %v, saw %v", expIndexMapping, actIndexMapping) - } -} - -func TestServer_GetIndexType(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - expIndexType := indexConfig.IndexType - if err != nil { - t.Fatalf("%v", err) - } - - actIndexType, err := client.Get("index_config/index_type") - if err != nil { - t.Fatalf("%v", err) - } - - if expIndexType != *actIndexType.(*string) { - t.Fatalf("expected content to see %v, saw %v", expIndexType, *actIndexType.(*string)) - } -} - -func TestServer_GetIndexStorageType(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) - } - - // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - expIndexStorageType := indexConfig.IndexStorageType - if err != nil { - t.Fatalf("%v", err) - } - - actIndexStorageType, err := client.Get("index_config/index_storage_type") - if err != nil { - t.Fatalf("%v", err) - } - - if expIndexStorageType != *actIndexStorageType.(*string) { - t.Fatalf("expected content to see %v, saw %v", expIndexStorageType, *actIndexStorageType.(*string)) - } -} - func TestServer_SetState(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + raftStorageType := "boltdb" + + node := &management.Node{ + BindAddress: bindAddress, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, nodeId, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -584,7 +380,7 @@ func TestServer_SetState(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -621,32 +417,34 @@ func TestServer_SetState(t *testing.T) { func TestServer_GetState(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + raftStorageType := "boltdb" + + node := &management.Node{ + BindAddress: bindAddress, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, nodeId, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -663,7 +461,7 @@ func TestServer_GetState(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -700,32 +498,34 @@ func TestServer_GetState(t *testing.T) { func TestServer_DeleteState(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + raftStorageType := "boltdb" + + node := &management.Node{ + BindAddress: bindAddress, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } // create server - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, nodeId, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -742,7 +542,7 @@ func TestServer_DeleteState(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -800,29 +600,34 @@ func TestServer_DeleteState(t *testing.T) { func TestCluster_Start(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + peerGrpcAddress1 := "" + grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir1 := testutils.TmpDir() + raftStorageType1 := "boltdb" + + node1 := &management.Node{ + BindAddress: bindAddress1, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + } + + indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("manager1"), grpcLogger, httpAccessLogger) + // create server + server1, err := NewServer(peerGrpcAddress1, nodeId1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -831,18 +636,34 @@ func TestCluster_Start(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server1 + + // start server server1.Start() - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("manager2"), grpcLogger, httpAccessLogger) + peerGrpcAddress2 := grpcAddress1 + grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir2 := testutils.TmpDir() + raftStorageType2 := "boltdb" + + node2 := &management.Node{ + BindAddress: bindAddress2, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + // create server + server2, err := NewServer(peerGrpcAddress2, nodeId2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -851,18 +672,34 @@ func TestCluster_Start(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server2 + + // start server server2.Start() - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("manager3"), grpcLogger, httpAccessLogger) + peerGrpcAddress3 := grpcAddress1 + grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir3 := testutils.TmpDir() + raftStorageType3 := "boltdb" + + node3 := &management.Node{ + BindAddress: bindAddress3, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + // create server + server3, err := NewServer(peerGrpcAddress3, nodeId3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() @@ -871,7 +708,8 @@ func TestCluster_Start(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server3 + + // start server server3.Start() // sleep @@ -881,29 +719,34 @@ func TestCluster_Start(t *testing.T) { func TestCluster_HealthCheck(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + peerGrpcAddress1 := "" + grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir1 := testutils.TmpDir() + raftStorageType1 := "boltdb" + + node1 := &management.Node{ + BindAddress: bindAddress1, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + } + + indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("manager1"), grpcLogger, httpAccessLogger) + // create server + server1, err := NewServer(peerGrpcAddress1, nodeId1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -912,18 +755,34 @@ func TestCluster_HealthCheck(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server1 + + // start server server1.Start() - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("manager2"), grpcLogger, httpAccessLogger) + peerGrpcAddress2 := grpcAddress1 + grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir2 := testutils.TmpDir() + raftStorageType2 := "boltdb" + + node2 := &management.Node{ + BindAddress: bindAddress2, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + // create server + server2, err := NewServer(peerGrpcAddress2, nodeId2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -932,18 +791,34 @@ func TestCluster_HealthCheck(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server2 + + // start server server2.Start() - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("manager3"), grpcLogger, httpAccessLogger) + peerGrpcAddress3 := grpcAddress1 + grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir3 := testutils.TmpDir() + raftStorageType3 := "boltdb" + + node3 := &management.Node{ + BindAddress: bindAddress3, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + // create server + server3, err := NewServer(peerGrpcAddress3, nodeId3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() @@ -952,28 +827,29 @@ func TestCluster_HealthCheck(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server3 + + // start server server3.Start() // sleep time.Sleep(5 * time.Second) // gRPC client for all servers - client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) + client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) defer func() { _ = client1.Close() }() if err != nil { t.Fatalf("%v", err) } - client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) defer func() { _ = client2.Close() }() if err != nil { t.Fatalf("%v", err) } - client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) defer func() { _ = client3.Close() }() @@ -1085,29 +961,34 @@ func TestCluster_HealthCheck(t *testing.T) { func TestCluster_GetNode(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + peerGrpcAddress1 := "" + grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir1 := testutils.TmpDir() + raftStorageType1 := "boltdb" + + node1 := &management.Node{ + BindAddress: bindAddress1, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + } + + indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("manager1"), grpcLogger, httpAccessLogger) + // create server + server1, err := NewServer(peerGrpcAddress1, nodeId1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -1116,18 +997,34 @@ func TestCluster_GetNode(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server1 + + // start server server1.Start() - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("manager2"), grpcLogger, httpAccessLogger) + peerGrpcAddress2 := grpcAddress1 + grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir2 := testutils.TmpDir() + raftStorageType2 := "boltdb" + + node2 := &management.Node{ + BindAddress: bindAddress2, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + // create server + server2, err := NewServer(peerGrpcAddress2, nodeId2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -1136,18 +1033,34 @@ func TestCluster_GetNode(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server2 + + // start server server2.Start() - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("manager3"), grpcLogger, httpAccessLogger) + peerGrpcAddress3 := grpcAddress1 + grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir3 := testutils.TmpDir() + raftStorageType3 := "boltdb" + + node3 := &management.Node{ + BindAddress: bindAddress3, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + // create server + server3, err := NewServer(peerGrpcAddress3, nodeId3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() @@ -1156,28 +1069,29 @@ func TestCluster_GetNode(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server3 + + // start server server3.Start() // sleep time.Sleep(5 * time.Second) // gRPC client for all servers - client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) + client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) defer func() { _ = client1.Close() }() if err != nil { t.Fatalf("%v", err) } - client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) defer func() { _ = client2.Close() }() if err != nil { t.Fatalf("%v", err) } - client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) defer func() { _ = client3.Close() }() @@ -1186,117 +1100,153 @@ func TestCluster_GetNode(t *testing.T) { } // get all node info from all nodes - node11, err := client1.NodeInfo(nodeConfig1.NodeId) + node11, err := client1.NodeInfo(nodeId1) if err != nil { t.Fatalf("%v", err) } - expNode11 := map[string]interface{}{ - "node_config": server1.nodeConfig.ToMap(), - "state": raft.Leader.String(), + expNode11 := &management.Node{ + BindAddress: bindAddress1, + Status: raft.Leader.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, } actNode11 := node11 if !reflect.DeepEqual(expNode11, actNode11) { t.Fatalf("expected content to see %v, saw %v", expNode11, actNode11) } - node12, err := client1.NodeInfo(nodeConfig2.NodeId) + node12, err := client1.NodeInfo(nodeId2) if err != nil { t.Fatalf("%v", err) } - expNode12 := map[string]interface{}{ - "node_config": server2.nodeConfig.ToMap(), - "state": raft.Follower.String(), + expNode12 := &management.Node{ + BindAddress: bindAddress2, + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, } actNode12 := node12 if !reflect.DeepEqual(expNode12, actNode12) { t.Fatalf("expected content to see %v, saw %v", expNode12, actNode12) } - node13, err := client1.NodeInfo(nodeConfig3.NodeId) + node13, err := client1.NodeInfo(nodeId3) if err != nil { t.Fatalf("%v", err) } - expNode13 := map[string]interface{}{ - "node_config": server3.nodeConfig.ToMap(), - "state": raft.Follower.String(), + expNode13 := &management.Node{ + BindAddress: bindAddress3, + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, } actNode13 := node13 if !reflect.DeepEqual(expNode13, actNode13) { t.Fatalf("expected content to see %v, saw %v", expNode13, actNode13) } - node21, err := client2.NodeInfo(nodeConfig1.NodeId) + node21, err := client2.NodeInfo(nodeId1) if err != nil { t.Fatalf("%v", err) } - expNode21 := map[string]interface{}{ - "node_config": server1.nodeConfig.ToMap(), - "state": raft.Leader.String(), + expNode21 := &management.Node{ + BindAddress: bindAddress1, + Status: raft.Leader.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, } actNode21 := node21 if !reflect.DeepEqual(expNode21, actNode21) { t.Fatalf("expected content to see %v, saw %v", expNode21, actNode21) } - node22, err := client2.NodeInfo(nodeConfig2.NodeId) + node22, err := client2.NodeInfo(nodeId2) if err != nil { t.Fatalf("%v", err) } - expNode22 := map[string]interface{}{ - "node_config": server2.nodeConfig.ToMap(), - "state": raft.Follower.String(), + expNode22 := &management.Node{ + BindAddress: bindAddress2, + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, } actNode22 := node22 if !reflect.DeepEqual(expNode22, actNode22) { t.Fatalf("expected content to see %v, saw %v", expNode22, actNode22) } - node23, err := client2.NodeInfo(nodeConfig3.NodeId) + node23, err := client2.NodeInfo(nodeId3) if err != nil { t.Fatalf("%v", err) } - expNode23 := map[string]interface{}{ - "node_config": server3.nodeConfig.ToMap(), - "state": raft.Follower.String(), + expNode23 := &management.Node{ + BindAddress: bindAddress3, + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, } actNode23 := node23 if !reflect.DeepEqual(expNode23, actNode23) { t.Fatalf("expected content to see %v, saw %v", expNode23, actNode23) } - node31, err := client3.NodeInfo(nodeConfig1.NodeId) + node31, err := client3.NodeInfo(nodeId1) if err != nil { t.Fatalf("%v", err) } - expNode31 := map[string]interface{}{ - "node_config": server1.nodeConfig.ToMap(), - "state": raft.Leader.String(), + expNode31 := &management.Node{ + BindAddress: bindAddress1, + Status: raft.Leader.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, } actNode31 := node31 if !reflect.DeepEqual(expNode31, actNode31) { t.Fatalf("expected content to see %v, saw %v", expNode31, actNode31) } - node32, err := client3.NodeInfo(nodeConfig2.NodeId) + node32, err := client3.NodeInfo(nodeId2) if err != nil { t.Fatalf("%v", err) } - expNode32 := map[string]interface{}{ - "node_config": server2.nodeConfig.ToMap(), - "state": raft.Follower.String(), + expNode32 := &management.Node{ + BindAddress: bindAddress2, + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, } actNode32 := node32 if !reflect.DeepEqual(expNode32, actNode32) { t.Fatalf("expected content to see %v, saw %v", expNode32, actNode32) } - node33, err := client3.NodeInfo(nodeConfig3.NodeId) + node33, err := client3.NodeInfo(nodeId3) if err != nil { t.Fatalf("%v", err) } - expNode33 := map[string]interface{}{ - "node_config": server3.nodeConfig.ToMap(), - "state": raft.Follower.String(), + expNode33 := &management.Node{ + BindAddress: bindAddress3, + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, } actNode33 := node33 if !reflect.DeepEqual(expNode33, actNode33) { @@ -1307,29 +1257,34 @@ func TestCluster_GetNode(t *testing.T) { func TestCluster_GetCluster(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + peerGrpcAddress1 := "" + grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir1 := testutils.TmpDir() + raftStorageType1 := "boltdb" + + node1 := &management.Node{ + BindAddress: bindAddress1, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + } + + indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("manager1"), grpcLogger, httpAccessLogger) + // create server + server1, err := NewServer(peerGrpcAddress1, nodeId1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -1338,18 +1293,34 @@ func TestCluster_GetCluster(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server1 + + // start server server1.Start() - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("manager2"), grpcLogger, httpAccessLogger) + peerGrpcAddress2 := grpcAddress1 + grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir2 := testutils.TmpDir() + raftStorageType2 := "boltdb" + + node2 := &management.Node{ + BindAddress: bindAddress2, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + // create server + server2, err := NewServer(peerGrpcAddress2, nodeId2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -1358,18 +1329,34 @@ func TestCluster_GetCluster(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server2 + + // start server server2.Start() - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("manager3"), grpcLogger, httpAccessLogger) + peerGrpcAddress3 := grpcAddress1 + grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir3 := testutils.TmpDir() + raftStorageType3 := "boltdb" + + node3 := &management.Node{ + BindAddress: bindAddress3, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + // create server + server3, err := NewServer(peerGrpcAddress3, nodeId3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() @@ -1378,28 +1365,29 @@ func TestCluster_GetCluster(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server3 + + // start server server3.Start() // sleep time.Sleep(5 * time.Second) - // gRPC client for manager1 - client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) + // gRPC client for all servers + client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) defer func() { _ = client1.Close() }() if err != nil { t.Fatalf("%v", err) } - client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) defer func() { _ = client2.Close() }() if err != nil { t.Fatalf("%v", err) } - client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) defer func() { _ = client3.Close() }() @@ -1412,18 +1400,32 @@ func TestCluster_GetCluster(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - expCluster1 := map[string]interface{}{ - nodeConfig1.NodeId: map[string]interface{}{ - "node_config": nodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - nodeConfig2.NodeId: map[string]interface{}{ - "node_config": nodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - nodeConfig3.NodeId: map[string]interface{}{ - "node_config": nodeConfig3.ToMap(), - "state": raft.Follower.String(), + expCluster1 := &management.Cluster{ + Nodes: map[string]*management.Node{ + nodeId1: { + BindAddress: bindAddress1, + Status: raft.Leader.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + }, + nodeId2: { + BindAddress: bindAddress2, + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + }, + nodeId3: { + BindAddress: bindAddress3, + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + }, }, } actCluster1 := cluster1 @@ -1435,18 +1437,32 @@ func TestCluster_GetCluster(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - expCluster2 := map[string]interface{}{ - nodeConfig1.NodeId: map[string]interface{}{ - "node_config": nodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - nodeConfig2.NodeId: map[string]interface{}{ - "node_config": nodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - nodeConfig3.NodeId: map[string]interface{}{ - "node_config": nodeConfig3.ToMap(), - "state": raft.Follower.String(), + expCluster2 := &management.Cluster{ + Nodes: map[string]*management.Node{ + nodeId1: { + BindAddress: bindAddress1, + Status: raft.Leader.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + }, + nodeId2: { + BindAddress: bindAddress2, + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + }, + nodeId3: { + BindAddress: bindAddress3, + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + }, }, } actCluster2 := cluster2 @@ -1458,18 +1474,32 @@ func TestCluster_GetCluster(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - expCluster3 := map[string]interface{}{ - nodeConfig1.NodeId: map[string]interface{}{ - "node_config": nodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - nodeConfig2.NodeId: map[string]interface{}{ - "node_config": nodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - nodeConfig3.NodeId: map[string]interface{}{ - "node_config": nodeConfig3.ToMap(), - "state": raft.Follower.String(), + expCluster3 := &management.Cluster{ + Nodes: map[string]*management.Node{ + nodeId1: { + BindAddress: bindAddress1, + Status: raft.Leader.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + }, + nodeId2: { + BindAddress: bindAddress2, + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + }, + nodeId3: { + BindAddress: bindAddress3, + Status: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + }, }, } actCluster3 := cluster3 @@ -1478,32 +1508,37 @@ func TestCluster_GetCluster(t *testing.T) { } } -func TestCluster_GetState(t *testing.T) { +func TestCluster_SetState(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + peerGrpcAddress1 := "" + grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir1 := testutils.TmpDir() + raftStorageType1 := "boltdb" + + node1 := &management.Node{ + BindAddress: bindAddress1, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + } + + indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("manager1"), grpcLogger, httpAccessLogger) + // create server + server1, err := NewServer(peerGrpcAddress1, nodeId1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -1512,18 +1547,34 @@ func TestCluster_GetState(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server1 + + // start server server1.Start() - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("manager2"), grpcLogger, httpAccessLogger) + peerGrpcAddress2 := grpcAddress1 + grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir2 := testutils.TmpDir() + raftStorageType2 := "boltdb" + + node2 := &management.Node{ + BindAddress: bindAddress2, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + // create server + server2, err := NewServer(peerGrpcAddress2, nodeId2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -1532,18 +1583,34 @@ func TestCluster_GetState(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server2 + + // start server server2.Start() - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("manager3"), grpcLogger, httpAccessLogger) + peerGrpcAddress3 := grpcAddress1 + grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir3 := testutils.TmpDir() + raftStorageType3 := "boltdb" + + node3 := &management.Node{ + BindAddress: bindAddress3, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + // create server + server3, err := NewServer(peerGrpcAddress3, nodeId3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() @@ -1552,28 +1619,29 @@ func TestCluster_GetState(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server3 + + // start server server3.Start() // sleep time.Sleep(5 * time.Second) - // gRPC client for manager1 - client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) + // gRPC client for all servers + client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) defer func() { _ = client1.Close() }() if err != nil { t.Fatalf("%v", err) } - client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) defer func() { _ = client2.Close() }() if err != nil { t.Fatalf("%v", err) } - client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) defer func() { _ = client3.Close() }() @@ -1581,64 +1649,143 @@ func TestCluster_GetState(t *testing.T) { t.Fatalf("%v", err) } - // get index mapping from all nodes - indexConfig1, err := client1.Get("index_config") + err = client1.Set("test/key1", "val1") + if err != nil { + t.Fatalf("%v", err) + } + time.Sleep(2 * time.Second) // wait for data to propagate + + // get value from all nodes + val11, err := client1.Get("test/key1") + if err != nil { + t.Fatalf("%v", err) + } + expVal11 := "val1" + actVal11 := *val11.(*string) + if expVal11 != actVal11 { + t.Fatalf("expected content to see %v, saw %v", expVal11, actVal11) + } + val21, err := client2.Get("test/key1") + if err != nil { + t.Fatalf("%v", err) + } + expVal21 := "val1" + actVal21 := *val21.(*string) + if expVal21 != actVal21 { + t.Fatalf("expected content to see %v, saw %v", expVal21, actVal21) + } + val31, err := client3.Get("test/key1") if err != nil { t.Fatalf("%v", err) } - expIndexConfig1 := indexConfig.ToMap() - actIndexConfig1 := *indexConfig1.(*map[string]interface{}) - if !reflect.DeepEqual(expIndexConfig1, actIndexConfig1) { - t.Fatalf("expected content to see %v, saw %v", expIndexConfig1, actIndexConfig1) + expVal31 := "val1" + actVal31 := *val31.(*string) + if expVal31 != actVal31 { + t.Fatalf("expected content to see %v, saw %v", expVal31, actVal31) + } + + err = client2.Set("test/key2", "val2") + if err != nil { + t.Fatalf("%v", err) } + time.Sleep(2 * time.Second) // wait for data to propagate - indexConfig2, err := client2.Get("index_config") + // get value from all nodes + val12, err := client1.Get("test/key2") + if err != nil { + t.Fatalf("%v", err) + } + expVal12 := "val2" + actVal12 := *val12.(*string) + if expVal12 != actVal12 { + t.Fatalf("expected content to see %v, saw %v", expVal12, actVal12) + } + val22, err := client2.Get("test/key2") + if err != nil { + t.Fatalf("%v", err) + } + expVal22 := "val2" + actVal22 := *val22.(*string) + if expVal22 != actVal22 { + t.Fatalf("expected content to see %v, saw %v", expVal22, actVal22) + } + val32, err := client3.Get("test/key2") if err != nil { t.Fatalf("%v", err) } - expIndexConfig2 := indexConfig.ToMap() - actIndexConfig2 := *indexConfig2.(*map[string]interface{}) - if !reflect.DeepEqual(expIndexConfig2, actIndexConfig2) { - t.Fatalf("expected content to see %v, saw %v", expIndexConfig2, actIndexConfig2) + expVal32 := "val2" + actVal32 := *val32.(*string) + if expVal32 != actVal32 { + t.Fatalf("expected content to see %v, saw %v", expVal32, actVal32) + } + + err = client3.Set("test/key3", "val3") + if err != nil { + t.Fatalf("%v", err) } + time.Sleep(2 * time.Second) // wait for data to propagate - indexConfig3, err := client3.Get("index_config") + // get value from all nodes + val13, err := client1.Get("test/key3") if err != nil { t.Fatalf("%v", err) } - expIndexConfig3 := indexConfig.ToMap() - actIndexConfig3 := *indexConfig3.(*map[string]interface{}) - if !reflect.DeepEqual(expIndexConfig3, actIndexConfig3) { - t.Fatalf("expected content to see %v, saw %v", expIndexConfig3, actIndexConfig3) + expVal13 := "val3" + actVal13 := *val13.(*string) + if expVal13 != actVal13 { + t.Fatalf("expected content to see %v, saw %v", expVal13, actVal13) + } + val23, err := client2.Get("test/key3") + if err != nil { + t.Fatalf("%v", err) + } + expVal23 := "val3" + actVal23 := *val23.(*string) + if expVal23 != actVal23 { + t.Fatalf("expected content to see %v, saw %v", expVal23, actVal23) + } + val33, err := client3.Get("test/key3") + if err != nil { + t.Fatalf("%v", err) + } + expVal33 := "val3" + actVal33 := *val33.(*string) + if expVal33 != actVal33 { + t.Fatalf("expected content to see %v, saw %v", expVal33, actVal33) } } -func TestCluster_SetState(t *testing.T) { +func TestCluster_GetState(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + peerGrpcAddress1 := "" + grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir1 := testutils.TmpDir() + raftStorageType1 := "boltdb" + + node1 := &management.Node{ + BindAddress: bindAddress1, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + } + + indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("manager1"), grpcLogger, httpAccessLogger) + // create server + server1, err := NewServer(peerGrpcAddress1, nodeId1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -1647,18 +1794,34 @@ func TestCluster_SetState(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server1 + + // start server server1.Start() - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("manager2"), grpcLogger, httpAccessLogger) + peerGrpcAddress2 := grpcAddress1 + grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir2 := testutils.TmpDir() + raftStorageType2 := "boltdb" + + node2 := &management.Node{ + BindAddress: bindAddress2, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + // create server + server2, err := NewServer(peerGrpcAddress2, nodeId2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -1667,18 +1830,34 @@ func TestCluster_SetState(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server2 + + // start server server2.Start() - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("manager3"), grpcLogger, httpAccessLogger) + peerGrpcAddress3 := grpcAddress1 + grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir3 := testutils.TmpDir() + raftStorageType3 := "boltdb" + + node3 := &management.Node{ + BindAddress: bindAddress3, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + // create server + server3, err := NewServer(peerGrpcAddress3, nodeId3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() @@ -1687,28 +1866,29 @@ func TestCluster_SetState(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server3 + + // start server server3.Start() // sleep time.Sleep(5 * time.Second) - // gRPC client for manager1 - client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) + // gRPC client for all servers + client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) defer func() { _ = client1.Close() }() if err != nil { t.Fatalf("%v", err) } - client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) defer func() { _ = client2.Close() }() if err != nil { t.Fatalf("%v", err) } - client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) defer func() { _ = client3.Close() }() @@ -1825,29 +2005,34 @@ func TestCluster_SetState(t *testing.T) { func TestCluster_DeleteState(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + peerGrpcAddress1 := "" + grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir1 := testutils.TmpDir() + raftStorageType1 := "boltdb" + + node1 := &management.Node{ + BindAddress: bindAddress1, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + } + + indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("manager1"), grpcLogger, httpAccessLogger) + // create server + server1, err := NewServer(peerGrpcAddress1, nodeId1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -1856,18 +2041,34 @@ func TestCluster_DeleteState(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server1 + + // start server server1.Start() - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("manager2"), grpcLogger, httpAccessLogger) + peerGrpcAddress2 := grpcAddress1 + grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir2 := testutils.TmpDir() + raftStorageType2 := "boltdb" + + node2 := &management.Node{ + BindAddress: bindAddress2, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + // create server + server2, err := NewServer(peerGrpcAddress2, nodeId2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -1876,18 +2077,34 @@ func TestCluster_DeleteState(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server2 + + // start server server2.Start() - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("manager3"), grpcLogger, httpAccessLogger) + peerGrpcAddress3 := grpcAddress1 + grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir3 := testutils.TmpDir() + raftStorageType3 := "boltdb" + + node3 := &management.Node{ + BindAddress: bindAddress3, + Status: "", + Metadata: &management.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + // create server + server3, err := NewServer(peerGrpcAddress3, nodeId3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() @@ -1896,28 +2113,29 @@ func TestCluster_DeleteState(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - // start server3 + + // start server server3.Start() // sleep time.Sleep(5 * time.Second) - // gRPC client for manager1 - client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) + // gRPC client for all servers + client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) defer func() { _ = client1.Close() }() if err != nil { t.Fatalf("%v", err) } - client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) defer func() { _ = client2.Close() }() if err != nil { t.Fatalf("%v", err) } - client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) defer func() { _ = client3.Close() }() diff --git a/protobuf/management/management.pb.go b/protobuf/management/management.pb.go index 5c7a100..8125b30 100644 --- a/protobuf/management/management.pb.go +++ b/protobuf/management/management.pb.go @@ -114,7 +114,7 @@ func (x WatchResponse_Command) String() string { } func (WatchResponse_Command) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{12, 0} + return fileDescriptor_5e030ad796566078, []int{15, 0} } type NodeHealthCheckRequest struct { @@ -196,6 +196,147 @@ func (m *NodeHealthCheckResponse) GetState() NodeHealthCheckResponse_State { } // use for raft +type Metadata struct { + GrpcAddress string `protobuf:"bytes,1,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` + HttpAddress string `protobuf:"bytes,2,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{2} +} + +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metadata.Unmarshal(m, b) +} +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) +} +func (m *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(m, src) +} +func (m *Metadata) XXX_Size() int { + return xxx_messageInfo_Metadata.Size(m) +} +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) +} + +var xxx_messageInfo_Metadata proto.InternalMessageInfo + +func (m *Metadata) GetGrpcAddress() string { + if m != nil { + return m.GrpcAddress + } + return "" +} + +func (m *Metadata) GetHttpAddress() string { + if m != nil { + return m.HttpAddress + } + return "" +} + +type Node struct { + BindAddress string `protobuf:"bytes,1,opt,name=bind_address,json=bindAddress,proto3" json:"bind_address,omitempty"` + Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` + Metadata *Metadata `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Node) Reset() { *m = Node{} } +func (m *Node) String() string { return proto.CompactTextString(m) } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{3} +} + +func (m *Node) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Node.Unmarshal(m, b) +} +func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Node.Marshal(b, m, deterministic) +} +func (m *Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_Node.Merge(m, src) +} +func (m *Node) XXX_Size() int { + return xxx_messageInfo_Node.Size(m) +} +func (m *Node) XXX_DiscardUnknown() { + xxx_messageInfo_Node.DiscardUnknown(m) +} + +var xxx_messageInfo_Node proto.InternalMessageInfo + +func (m *Node) GetBindAddress() string { + if m != nil { + return m.BindAddress + } + return "" +} + +func (m *Node) GetStatus() string { + if m != nil { + return m.Status + } + return "" +} + +func (m *Node) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +type Cluster struct { + Nodes map[string]*Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{4} +} + +func (m *Cluster) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Cluster.Unmarshal(m, b) +} +func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) +} +func (m *Cluster) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cluster.Merge(m, src) +} +func (m *Cluster) XXX_Size() int { + return xxx_messageInfo_Cluster.Size(m) +} +func (m *Cluster) XXX_DiscardUnknown() { + xxx_messageInfo_Cluster.DiscardUnknown(m) +} + +var xxx_messageInfo_Cluster proto.InternalMessageInfo + +func (m *Cluster) GetNodes() map[string]*Node { + if m != nil { + return m.Nodes + } + return nil +} + type NodeInfoRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -207,7 +348,7 @@ func (m *NodeInfoRequest) Reset() { *m = NodeInfoRequest{} } func (m *NodeInfoRequest) String() string { return proto.CompactTextString(m) } func (*NodeInfoRequest) ProtoMessage() {} func (*NodeInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{2} + return fileDescriptor_5e030ad796566078, []int{5} } func (m *NodeInfoRequest) XXX_Unmarshal(b []byte) error { @@ -236,8 +377,7 @@ func (m *NodeInfoRequest) GetId() string { } type NodeInfoResponse struct { - NodeConfig *any.Any `protobuf:"bytes,1,opt,name=nodeConfig,proto3" json:"nodeConfig,omitempty"` - State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -247,7 +387,7 @@ func (m *NodeInfoResponse) Reset() { *m = NodeInfoResponse{} } func (m *NodeInfoResponse) String() string { return proto.CompactTextString(m) } func (*NodeInfoResponse) ProtoMessage() {} func (*NodeInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{3} + return fileDescriptor_5e030ad796566078, []int{6} } func (m *NodeInfoResponse) XXX_Unmarshal(b []byte) error { @@ -268,23 +408,16 @@ func (m *NodeInfoResponse) XXX_DiscardUnknown() { var xxx_messageInfo_NodeInfoResponse proto.InternalMessageInfo -func (m *NodeInfoResponse) GetNodeConfig() *any.Any { +func (m *NodeInfoResponse) GetNode() *Node { if m != nil { - return m.NodeConfig + return m.Node } return nil } -func (m *NodeInfoResponse) GetState() string { - if m != nil { - return m.State - } - return "" -} - type ClusterJoinRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - NodeConfig *any.Any `protobuf:"bytes,2,opt,name=nodeConfig,proto3" json:"nodeConfig,omitempty"` + Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -294,7 +427,7 @@ func (m *ClusterJoinRequest) Reset() { *m = ClusterJoinRequest{} } func (m *ClusterJoinRequest) String() string { return proto.CompactTextString(m) } func (*ClusterJoinRequest) ProtoMessage() {} func (*ClusterJoinRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{4} + return fileDescriptor_5e030ad796566078, []int{7} } func (m *ClusterJoinRequest) XXX_Unmarshal(b []byte) error { @@ -322,9 +455,9 @@ func (m *ClusterJoinRequest) GetId() string { return "" } -func (m *ClusterJoinRequest) GetNodeConfig() *any.Any { +func (m *ClusterJoinRequest) GetNode() *Node { if m != nil { - return m.NodeConfig + return m.Node } return nil } @@ -340,7 +473,7 @@ func (m *ClusterLeaveRequest) Reset() { *m = ClusterLeaveRequest{} } func (m *ClusterLeaveRequest) String() string { return proto.CompactTextString(m) } func (*ClusterLeaveRequest) ProtoMessage() {} func (*ClusterLeaveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{5} + return fileDescriptor_5e030ad796566078, []int{8} } func (m *ClusterLeaveRequest) XXX_Unmarshal(b []byte) error { @@ -369,7 +502,7 @@ func (m *ClusterLeaveRequest) GetId() string { } type ClusterInfoResponse struct { - Cluster *any.Any `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -379,7 +512,7 @@ func (m *ClusterInfoResponse) Reset() { *m = ClusterInfoResponse{} } func (m *ClusterInfoResponse) String() string { return proto.CompactTextString(m) } func (*ClusterInfoResponse) ProtoMessage() {} func (*ClusterInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{6} + return fileDescriptor_5e030ad796566078, []int{9} } func (m *ClusterInfoResponse) XXX_Unmarshal(b []byte) error { @@ -400,7 +533,7 @@ func (m *ClusterInfoResponse) XXX_DiscardUnknown() { var xxx_messageInfo_ClusterInfoResponse proto.InternalMessageInfo -func (m *ClusterInfoResponse) GetCluster() *any.Any { +func (m *ClusterInfoResponse) GetCluster() *Cluster { if m != nil { return m.Cluster } @@ -418,7 +551,7 @@ func (m *GetRequest) Reset() { *m = GetRequest{} } func (m *GetRequest) String() string { return proto.CompactTextString(m) } func (*GetRequest) ProtoMessage() {} func (*GetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{7} + return fileDescriptor_5e030ad796566078, []int{10} } func (m *GetRequest) XXX_Unmarshal(b []byte) error { @@ -457,7 +590,7 @@ func (m *GetResponse) Reset() { *m = GetResponse{} } func (m *GetResponse) String() string { return proto.CompactTextString(m) } func (*GetResponse) ProtoMessage() {} func (*GetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{8} + return fileDescriptor_5e030ad796566078, []int{11} } func (m *GetResponse) XXX_Unmarshal(b []byte) error { @@ -497,7 +630,7 @@ func (m *SetRequest) Reset() { *m = SetRequest{} } func (m *SetRequest) String() string { return proto.CompactTextString(m) } func (*SetRequest) ProtoMessage() {} func (*SetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{9} + return fileDescriptor_5e030ad796566078, []int{12} } func (m *SetRequest) XXX_Unmarshal(b []byte) error { @@ -543,7 +676,7 @@ func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } func (*DeleteRequest) ProtoMessage() {} func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{10} + return fileDescriptor_5e030ad796566078, []int{13} } func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { @@ -582,7 +715,7 @@ func (m *WatchRequest) Reset() { *m = WatchRequest{} } func (m *WatchRequest) String() string { return proto.CompactTextString(m) } func (*WatchRequest) ProtoMessage() {} func (*WatchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{11} + return fileDescriptor_5e030ad796566078, []int{14} } func (m *WatchRequest) XXX_Unmarshal(b []byte) error { @@ -623,7 +756,7 @@ func (m *WatchResponse) Reset() { *m = WatchResponse{} } func (m *WatchResponse) String() string { return proto.CompactTextString(m) } func (*WatchResponse) ProtoMessage() {} func (*WatchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{12} + return fileDescriptor_5e030ad796566078, []int{15} } func (m *WatchResponse) XXX_Unmarshal(b []byte) error { @@ -671,6 +804,10 @@ func init() { proto.RegisterEnum("management.WatchResponse_Command", WatchResponse_Command_name, WatchResponse_Command_value) proto.RegisterType((*NodeHealthCheckRequest)(nil), "management.NodeHealthCheckRequest") proto.RegisterType((*NodeHealthCheckResponse)(nil), "management.NodeHealthCheckResponse") + proto.RegisterType((*Metadata)(nil), "management.Metadata") + proto.RegisterType((*Node)(nil), "management.Node") + proto.RegisterType((*Cluster)(nil), "management.Cluster") + proto.RegisterMapType((map[string]*Node)(nil), "management.Cluster.NodesEntry") proto.RegisterType((*NodeInfoRequest)(nil), "management.NodeInfoRequest") proto.RegisterType((*NodeInfoResponse)(nil), "management.NodeInfoResponse") proto.RegisterType((*ClusterJoinRequest)(nil), "management.ClusterJoinRequest") @@ -689,52 +826,61 @@ func init() { } var fileDescriptor_5e030ad796566078 = []byte{ - // 719 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xff, 0x6f, 0xd2, 0x40, - 0x1c, 0xa5, 0x65, 0x05, 0xf6, 0x61, 0x5f, 0x9a, 0x73, 0xd9, 0x17, 0x66, 0xe6, 0x56, 0x35, 0x99, - 0x2e, 0x16, 0x33, 0x35, 0x6a, 0xfc, 0x8a, 0xd0, 0x0c, 0x36, 0xec, 0x4c, 0xcb, 0x5c, 0xb6, 0x18, - 0x4d, 0x81, 0x1b, 0x90, 0xd1, 0x1e, 0xae, 0xc7, 0x92, 0xfd, 0x15, 0xfa, 0x97, 0xf8, 0xab, 0xff, - 0x9e, 0x69, 0xaf, 0x85, 0x1b, 0xb4, 0xdd, 0x12, 0x7f, 0xe3, 0xee, 0xde, 0x7b, 0x9f, 0xf7, 0xae, - 0x7d, 0x05, 0x1e, 0x0c, 0x2e, 0x08, 0x25, 0xcd, 0xe1, 0x59, 0xd1, 0xb6, 0x1c, 0xab, 0x83, 0x6d, - 0xec, 0x50, 0xee, 0xa7, 0xea, 0x1f, 0x23, 0x18, 0xef, 0x14, 0xd6, 0x3a, 0x84, 0x74, 0xfa, 0xb8, - 0x38, 0x22, 0x5a, 0xce, 0x15, 0x83, 0x15, 0xd6, 0x27, 0x8f, 0xb0, 0x3d, 0xa0, 0xc1, 0xa1, 0xf2, - 0x4b, 0x80, 0x65, 0x9d, 0xb4, 0x71, 0x15, 0x5b, 0x7d, 0xda, 0x2d, 0x77, 0x71, 0xeb, 0xdc, 0xc0, - 0x3f, 0x87, 0xd8, 0xa5, 0xe8, 0x3d, 0x48, 0x83, 0x0b, 0xd2, 0xc4, 0xab, 0xc2, 0xa6, 0xb0, 0xbd, - 0xb0, 0xbb, 0xad, 0x72, 0x06, 0xa2, 0x29, 0xea, 0x17, 0x0f, 0x6f, 0x30, 0x9a, 0xf2, 0x02, 0x24, - 0x7f, 0x8d, 0x16, 0x21, 0x5f, 0xd5, 0x4a, 0xf5, 0x46, 0xb5, 0xa6, 0x6b, 0xa6, 0x29, 0xa7, 0xd0, - 0x1c, 0xe4, 0xea, 0xb5, 0xaf, 0x9a, 0xbf, 0x12, 0xd0, 0x3c, 0xcc, 0x1a, 0x5a, 0xa9, 0xc2, 0x0e, - 0x45, 0xe5, 0x8f, 0x00, 0x2b, 0x53, 0xf2, 0xee, 0x80, 0x38, 0x2e, 0x46, 0x1f, 0x40, 0x72, 0xa9, - 0x45, 0x43, 0x4b, 0x8f, 0x12, 0x2d, 0x31, 0x8e, 0x6a, 0x7a, 0x04, 0x83, 0xf1, 0x14, 0x03, 0x24, - 0x7f, 0x8d, 0xf2, 0x90, 0x65, 0x9e, 0x4e, 0xe4, 0x94, 0xe7, 0xe0, 0x48, 0x0f, 0x97, 0x02, 0x9a, - 0x05, 0xa9, 0xe4, 0xf9, 0x93, 0x45, 0x94, 0x83, 0x99, 0x8a, 0x56, 0xaa, 0xc8, 0x69, 0x6f, 0xd3, - 0x73, 0x79, 0x22, 0xcf, 0x78, 0x70, 0xfd, 0xb0, 0xf1, 0x83, 0x2d, 0x25, 0x65, 0x0b, 0x16, 0xbd, - 0xd9, 0x35, 0xe7, 0x8c, 0x84, 0x57, 0xb7, 0x00, 0x62, 0xaf, 0xed, 0x9b, 0x9c, 0x35, 0xc4, 0x5e, - 0x5b, 0xf9, 0x0e, 0xf2, 0x18, 0x12, 0x64, 0x79, 0x0e, 0xe0, 0x90, 0x36, 0x2e, 0x13, 0xe7, 0xac, - 0xd7, 0xf1, 0xb1, 0xf9, 0xdd, 0x25, 0x95, 0x3d, 0x2b, 0x35, 0x7c, 0x56, 0x6a, 0xc9, 0xb9, 0x32, - 0x38, 0x1c, 0x5a, 0x0a, 0x6f, 0x40, 0xf4, 0xc5, 0x83, 0x58, 0xa7, 0x80, 0xca, 0xfd, 0xa1, 0x4b, - 0xf1, 0xc5, 0x3e, 0xe9, 0x39, 0x31, 0x2e, 0x26, 0x26, 0x8a, 0xb7, 0x9b, 0xa8, 0x3c, 0x84, 0x3b, - 0x81, 0x76, 0x1d, 0x5b, 0x97, 0x38, 0x2e, 0xa2, 0x36, 0x82, 0x5d, 0x4b, 0xa9, 0x42, 0xb6, 0xc5, - 0xb6, 0x13, 0x23, 0x86, 0x20, 0x65, 0x03, 0x60, 0x0f, 0xd3, 0x70, 0x88, 0x0c, 0xe9, 0x73, 0x7c, - 0x15, 0x4c, 0xf1, 0x7e, 0x2a, 0xaf, 0x21, 0xef, 0x9f, 0x07, 0xf2, 0x8f, 0x41, 0xba, 0xb4, 0xfa, - 0x43, 0x9c, 0x28, 0xce, 0x20, 0xca, 0x3e, 0x80, 0x99, 0x20, 0x3d, 0xd6, 0x12, 0x6f, 0xd6, 0xda, - 0x82, 0xf9, 0x0a, 0xee, 0x63, 0x8a, 0xe3, 0x9d, 0x6e, 0xc2, 0xdc, 0xb1, 0x45, 0x5b, 0xdd, 0x78, - 0xc4, 0x5f, 0x01, 0xe6, 0x03, 0x48, 0x10, 0xe7, 0x0d, 0x64, 0x5b, 0xc4, 0xb6, 0x2d, 0xa7, 0x1d, - 0xbc, 0xe1, 0x5b, 0xfc, 0x1b, 0x7e, 0x0d, 0xab, 0x96, 0x19, 0xd0, 0x08, 0x19, 0xe1, 0x00, 0x31, - 0x22, 0x51, 0xfa, 0xe6, 0x44, 0x3b, 0x90, 0x0d, 0x14, 0xbd, 0x6e, 0x1c, 0xe9, 0x07, 0xfa, 0xe1, - 0xb1, 0x2e, 0xa7, 0x50, 0x16, 0xd2, 0xa6, 0xd6, 0x90, 0x05, 0x04, 0x90, 0xa9, 0x68, 0x75, 0xad, - 0xa1, 0xc9, 0xe2, 0xee, 0xef, 0x0c, 0xc0, 0xe7, 0x91, 0x31, 0xf4, 0x8d, 0x35, 0x80, 0x6b, 0x1f, - 0x52, 0x6e, 0xfe, 0x5a, 0x14, 0xee, 0xdf, 0xa2, 0xbe, 0x4a, 0x0a, 0xed, 0x41, 0x2e, 0x2c, 0x0f, - 0x5a, 0x9f, 0xa4, 0x70, 0xad, 0x2b, 0xdc, 0x8d, 0x3e, 0xe4, 0x84, 0xf2, 0x5c, 0x4b, 0xd0, 0x06, - 0x0f, 0x9f, 0xae, 0x4f, 0x61, 0x79, 0xea, 0xba, 0x34, 0xef, 0xc3, 0xa9, 0xa4, 0x50, 0x0d, 0xe6, - 0xf8, 0x4a, 0xa0, 0x7b, 0x11, 0x4a, 0x7c, 0x59, 0x12, 0xa4, 0xaa, 0x23, 0x4f, 0x7e, 0xbe, 0x18, - 0x60, 0x21, 0x6a, 0xc2, 0x44, 0xba, 0x83, 0x91, 0x29, 0xff, 0x3d, 0xf9, 0x0f, 0xa9, 0xa7, 0x02, - 0x7a, 0x05, 0xe9, 0x3d, 0x4c, 0xd1, 0x32, 0x8f, 0x1d, 0xf7, 0xb2, 0xb0, 0x32, 0xb5, 0x3f, 0xb2, - 0xf1, 0x12, 0xd2, 0xe6, 0x24, 0x73, 0x5c, 0xbb, 0x84, 0x9b, 0x78, 0x07, 0x19, 0x56, 0x29, 0xb4, - 0xc6, 0x73, 0xaf, 0xd5, 0x2c, 0x81, 0xfe, 0x11, 0x24, 0x96, 0x7b, 0x35, 0xa2, 0x32, 0x8c, 0xbc, - 0x16, 0x5b, 0x26, 0x3f, 0xf3, 0x5b, 0xc8, 0x99, 0x8e, 0x35, 0x70, 0xbb, 0x84, 0xc6, 0x5e, 0x5e, - 0xec, 0xfc, 0x4f, 0x4f, 0x4e, 0x77, 0x3a, 0x3d, 0xda, 0x1d, 0x36, 0xd5, 0x16, 0xb1, 0x8b, 0x36, - 0x71, 0x87, 0xe7, 0x56, 0xb1, 0xd9, 0xb7, 0x5c, 0x5a, 0x8c, 0xf8, 0x33, 0x6f, 0x66, 0xfc, 0xcd, - 0x67, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x53, 0x09, 0x0b, 0x91, 0xea, 0x07, 0x00, 0x00, + // 855 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xff, 0x6f, 0xda, 0x46, + 0x14, 0xc7, 0x76, 0x0c, 0xe4, 0x39, 0x69, 0xad, 0xeb, 0x94, 0x26, 0xee, 0xd4, 0x25, 0xb7, 0x6e, + 0xca, 0x56, 0xd5, 0x54, 0x6c, 0xd3, 0xb2, 0xef, 0x63, 0xc1, 0x4a, 0xa0, 0x94, 0x56, 0x86, 0xae, + 0xea, 0x34, 0xa9, 0x3a, 0xf0, 0x15, 0x50, 0xb0, 0xcd, 0xf0, 0x11, 0x89, 0xbf, 0x61, 0x3f, 0x6c, + 0x7f, 0xc9, 0x7e, 0xdd, 0xbf, 0x37, 0x9d, 0xef, 0x6c, 0x1c, 0xb0, 0xc9, 0xa4, 0xfe, 0xe6, 0x7b, + 0xef, 0xf3, 0x3e, 0xf7, 0x79, 0x2f, 0xf7, 0x79, 0x01, 0x1e, 0xcd, 0xe6, 0x21, 0x0b, 0x07, 0x8b, + 0x77, 0x35, 0x9f, 0x04, 0x64, 0x44, 0x7d, 0x1a, 0xb0, 0xcc, 0xa7, 0x1d, 0xa7, 0x11, 0xac, 0x22, + 0xd6, 0xd1, 0x28, 0x0c, 0x47, 0x53, 0x5a, 0x4b, 0x0b, 0x49, 0xb0, 0x14, 0x30, 0xeb, 0xc1, 0x7a, + 0x8a, 0xfa, 0x33, 0x26, 0x93, 0xf8, 0x2f, 0x05, 0x0e, 0xba, 0xa1, 0x47, 0x2f, 0x29, 0x99, 0xb2, + 0xf1, 0xf9, 0x98, 0x0e, 0xaf, 0x5c, 0xfa, 0xc7, 0x82, 0x46, 0x0c, 0xfd, 0x08, 0xfa, 0x6c, 0x1e, + 0x0e, 0xe8, 0xa1, 0x72, 0xac, 0x9c, 0xde, 0xa9, 0x9f, 0xda, 0x19, 0x01, 0xf9, 0x25, 0xf6, 0x4b, + 0x8e, 0x77, 0x45, 0x19, 0xfe, 0x0a, 0xf4, 0xf8, 0x8c, 0xee, 0x82, 0x71, 0xe9, 0x34, 0x3a, 0xfd, + 0xcb, 0x56, 0xd7, 0xe9, 0xf5, 0xcc, 0x12, 0xda, 0x83, 0x6a, 0xa7, 0xf5, 0xab, 0x13, 0x9f, 0x14, + 0xb4, 0x0f, 0xbb, 0xae, 0xd3, 0x68, 0x8a, 0xa4, 0x8a, 0xff, 0x51, 0xe0, 0xfe, 0x06, 0x7d, 0x34, + 0x0b, 0x83, 0x88, 0xa2, 0x9f, 0x40, 0x8f, 0x18, 0x61, 0x89, 0xa4, 0xcf, 0xb6, 0x4a, 0x12, 0x35, + 0x76, 0x8f, 0x17, 0xb8, 0xa2, 0x0e, 0xbb, 0xa0, 0xc7, 0x67, 0x64, 0x40, 0x45, 0x68, 0x7a, 0x63, + 0x96, 0xb8, 0x82, 0x57, 0xdd, 0xe4, 0xa8, 0xa0, 0x5d, 0xd0, 0x1b, 0x5c, 0x9f, 0xa9, 0xa2, 0x2a, + 0xec, 0x34, 0x9d, 0x46, 0xd3, 0xd4, 0x78, 0x90, 0xab, 0x7c, 0x63, 0xee, 0x70, 0x78, 0xf7, 0x45, + 0xff, 0xad, 0x38, 0xea, 0xf8, 0x25, 0x54, 0x9f, 0x53, 0x46, 0x3c, 0xc2, 0x08, 0x3a, 0x81, 0xbd, + 0xd1, 0x7c, 0x36, 0x7c, 0x4b, 0x3c, 0x6f, 0x4e, 0xa3, 0x28, 0xd6, 0xb9, 0xeb, 0x1a, 0x3c, 0xd6, + 0x10, 0x21, 0x0e, 0x19, 0x33, 0x36, 0x4b, 0x21, 0xaa, 0x80, 0xf0, 0x98, 0x84, 0xe0, 0x08, 0x76, + 0x78, 0x37, 0x1c, 0x3a, 0x98, 0x04, 0xde, 0x3a, 0x1b, 0x8f, 0x25, 0x6c, 0x07, 0x50, 0xe6, 0x9d, + 0x2d, 0x12, 0x1e, 0x79, 0x42, 0x4f, 0xa1, 0xea, 0x4b, 0x51, 0x87, 0xda, 0xb1, 0x72, 0x6a, 0xd4, + 0x3f, 0xc8, 0x0e, 0x2b, 0x11, 0xec, 0xa6, 0x28, 0xfc, 0xa7, 0x02, 0x95, 0xf3, 0xe9, 0x22, 0x62, + 0x74, 0x8e, 0xbe, 0x04, 0x3d, 0x08, 0x3d, 0xca, 0x6f, 0xd4, 0x4e, 0x8d, 0xfa, 0xc3, 0x6c, 0xa9, + 0xc4, 0xc4, 0xf3, 0x8e, 0x9c, 0x80, 0xcd, 0x97, 0xae, 0x00, 0x5b, 0x6d, 0x80, 0x55, 0x10, 0x99, + 0xa0, 0x5d, 0xd1, 0xa5, 0xd4, 0xcc, 0x3f, 0xd1, 0xa7, 0xa0, 0x5f, 0x93, 0xe9, 0x82, 0xc6, 0x52, + 0x8d, 0xba, 0xb9, 0xfe, 0xd7, 0x73, 0x45, 0xfa, 0x5b, 0xf5, 0x4c, 0xc1, 0x27, 0x70, 0x97, 0x87, + 0x5a, 0xc1, 0xbb, 0x30, 0x79, 0x8f, 0x77, 0x40, 0x9d, 0x78, 0x92, 0x4f, 0x9d, 0x78, 0xf8, 0x0c, + 0xcc, 0x15, 0x44, 0x3e, 0x90, 0x47, 0xb0, 0xc3, 0xb5, 0xc4, 0xa8, 0xbc, 0x1b, 0xe2, 0x2c, 0x6e, + 0x03, 0x92, 0x5d, 0xb4, 0xc3, 0x49, 0x50, 0xc0, 0x9f, 0x72, 0xa9, 0x5b, 0xb9, 0x3e, 0x81, 0x7b, + 0x92, 0xab, 0x43, 0xc9, 0x35, 0x2d, 0x12, 0xdb, 0x4c, 0x61, 0x37, 0xf4, 0x3e, 0x81, 0xca, 0x50, + 0x84, 0xa5, 0xe4, 0x7b, 0x39, 0xa3, 0x76, 0x13, 0x0c, 0x7e, 0x08, 0x70, 0x41, 0x59, 0x72, 0xc7, + 0xc6, 0x84, 0xf1, 0x37, 0x60, 0xc4, 0x79, 0xc9, 0xfe, 0x79, 0x32, 0x70, 0x45, 0xbe, 0x00, 0xb1, + 0x09, 0xec, 0x64, 0x13, 0xd8, 0x8d, 0x60, 0x29, 0x87, 0x8e, 0xdb, 0x00, 0xbd, 0x2d, 0xd4, 0x2b, + 0x2e, 0xf5, 0x76, 0xae, 0x13, 0xd8, 0x6f, 0xd2, 0x29, 0x65, 0xb4, 0x58, 0xe9, 0x31, 0xec, 0xbd, + 0x26, 0x6c, 0x38, 0x2e, 0x46, 0xfc, 0xab, 0xc0, 0xbe, 0x84, 0xc8, 0x76, 0xbe, 0x83, 0xca, 0x30, + 0xf4, 0x7d, 0x12, 0x78, 0xd2, 0xff, 0x27, 0xd9, 0x61, 0xdd, 0xc0, 0xda, 0xe7, 0x02, 0xe8, 0x26, + 0x15, 0xc9, 0x05, 0x6a, 0x4e, 0x47, 0xda, 0xed, 0x1d, 0x3d, 0x86, 0x8a, 0x64, 0xe4, 0x9b, 0xe3, + 0x55, 0xf7, 0x59, 0xf7, 0xc5, 0xeb, 0xae, 0x59, 0x42, 0x15, 0xd0, 0x7a, 0x4e, 0xdf, 0x54, 0x10, + 0x40, 0xb9, 0xe9, 0x74, 0x9c, 0xbe, 0x63, 0xaa, 0xf5, 0xbf, 0xcb, 0x00, 0xcf, 0x53, 0x61, 0xe8, + 0x77, 0xf1, 0x94, 0x33, 0xbb, 0x09, 0xe1, 0xdb, 0x77, 0xa9, 0xf5, 0xf1, 0xff, 0x58, 0x6e, 0xb8, + 0x84, 0x2e, 0xa0, 0x9a, 0xb8, 0x00, 0x3d, 0x58, 0x2f, 0xc9, 0xd8, 0xc7, 0xfa, 0x30, 0x3f, 0x99, + 0x21, 0x32, 0x32, 0xa6, 0x40, 0x79, 0x9e, 0xcf, 0xb8, 0xc5, 0x3a, 0xd8, 0x18, 0x97, 0xc3, 0xff, + 0xad, 0xe0, 0x12, 0x6a, 0xc1, 0x5e, 0xd6, 0x11, 0xe8, 0xa3, 0x1c, 0xa6, 0xac, 0x57, 0xb6, 0x50, + 0x5d, 0xa6, 0x9a, 0xe2, 0xfe, 0x0a, 0x80, 0x56, 0xde, 0x0d, 0x6b, 0xdd, 0x3d, 0x4b, 0x45, 0xc5, + 0xef, 0xe4, 0x3d, 0xa8, 0x9e, 0x2a, 0xe8, 0x0c, 0xb4, 0x0b, 0xca, 0xd0, 0x41, 0x16, 0xbb, 0xf2, + 0xa5, 0x75, 0x7f, 0x23, 0x9e, 0xca, 0xf8, 0x1a, 0xb4, 0xde, 0x7a, 0xe5, 0xca, 0x76, 0x5b, 0x26, + 0xf1, 0x03, 0x94, 0x85, 0xa5, 0xd0, 0x51, 0xb6, 0xf6, 0x86, 0xcd, 0xb6, 0x94, 0xff, 0x0c, 0xba, + 0xe8, 0xfb, 0x30, 0xc7, 0x32, 0xa2, 0xf8, 0xa8, 0xd0, 0x4c, 0x71, 0xcf, 0xdf, 0x43, 0xb5, 0x17, + 0x90, 0x59, 0x34, 0x0e, 0x59, 0xe1, 0xf0, 0x0a, 0xef, 0xff, 0xe5, 0xc9, 0x6f, 0x8f, 0x47, 0x13, + 0x36, 0x5e, 0x0c, 0xec, 0x61, 0xe8, 0xd7, 0xfc, 0x30, 0x5a, 0x5c, 0x91, 0xda, 0x60, 0x4a, 0x22, + 0x56, 0xcb, 0xf9, 0xa9, 0x33, 0x28, 0xc7, 0xc1, 0x2f, 0xfe, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x09, + 0x67, 0x31, 0xd3, 0x08, 0x09, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/protobuf/management/management.proto b/protobuf/management/management.proto index e3fc2cd..0e33f27 100644 --- a/protobuf/management/management.proto +++ b/protobuf/management/management.proto @@ -59,18 +59,32 @@ message NodeHealthCheckResponse { } // use for raft +message Metadata { + string grpc_address = 1; + string http_address = 2; +} + +message Node { + string bind_address = 1; + string status = 2; + Metadata metadata = 3; +} + +message Cluster { + map nodes = 1; +} + message NodeInfoRequest { string id = 1; } message NodeInfoResponse { - google.protobuf.Any nodeConfig = 1; - string state = 2; + Node node = 1; } message ClusterJoinRequest { string id = 1; - google.protobuf.Any nodeConfig = 2; + Node node = 2; } message ClusterLeaveRequest { @@ -78,7 +92,7 @@ message ClusterLeaveRequest { } message ClusterInfoResponse { - google.protobuf.Any cluster = 1; + Cluster cluster = 1; } message GetRequest { From 3f0601b9dcd31fdaab5b24dedb0b9860bb2306fd Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 30 Jul 2019 17:13:32 +0900 Subject: [PATCH 09/76] Update CHANGES.md --- CHANGES.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 3731fd2..9ee2110 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -15,6 +15,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - New CLI #82 - Split protobuf into components #84 - Change subcommands #85 +- Update protobuf #86 +- Change protobuf #87 ## [v0.7.1] - 2019-07-18 From 87fe732e37dd465d7fe40479f4adfd7b8a7046ef Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 30 Jul 2019 21:33:03 +0900 Subject: [PATCH 10/76] Refactoring (#89) --- cmd/blast/main.go | 64 +------- cmd/blast/manager_node_info.go | 8 +- dispatcher/grpc_service.go | 4 +- dispatcher/server_test.go | 216 +++++++++++++++------------ indexer/grpc_service.go | 4 +- manager/grpc_client.go | 8 +- manager/grpc_service.go | 113 ++++++-------- manager/raft_fsm_test.go | 28 ++-- manager/raft_server.go | 50 +++---- manager/server_test.go | 198 ++++++------------------ protobuf/management/management.pb.go | 187 +++++++++-------------- protobuf/management/management.proto | 8 +- 12 files changed, 338 insertions(+), 550 deletions(-) diff --git a/cmd/blast/main.go b/cmd/blast/main.go index 1a04817..1997d3f 100644 --- a/cmd/blast/main.go +++ b/cmd/blast/main.go @@ -77,7 +77,7 @@ func main() { }, cli.StringFlag{ Name: "data-dir", - Value: "/tmp/blast/indexer", + Value: "/tmp/blast/manager", EnvVar: "BLAST_MANAGER_DATA_DIR", Usage: "A data directory for the node to store state", }, @@ -215,19 +215,9 @@ func main() { Name: "info", Usage: "Get node information", Flags: []cli.Flag{ - cli.StringFlag{ - Name: "peer-grpc-address", - Value: "", - Usage: "The gRPC address of the peer node in which the target node for retrieving the information is joining", - }, - cli.StringFlag{ - Name: "node-id", - Value: "", - Usage: "The node ID for which to retrieve the node information", - }, cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5100", Usage: "The gRPC address of the node for which to retrieve the node information", }, }, @@ -267,26 +257,6 @@ func main() { Name: "info", Usage: "Get cluster information", Flags: []cli.Flag{ - //cli.StringFlag{ - // Name: "cluster-grpc-address", - // Value: "", - // Usage: "The gRPC address of the cluster in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "shard-id", - // Value: "", - // Usage: "Shard ID registered in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "peer-grpc-address", - // Value: "", - // Usage: "The gRPC address of the peer node in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "node-id", - // Value: "", - // Usage: "The node ID for which to retrieve the node information", - //}, cli.StringFlag{ Name: "grpc-address", Value: ":5100", @@ -299,26 +269,6 @@ func main() { Name: "watch", Usage: "Watch peers", Flags: []cli.Flag{ - //cli.StringFlag{ - // Name: "cluster-grpc-address", - // Value: "", - // Usage: "The gRPC address of the cluster in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "shard-id", - // Value: "", - // Usage: "Shard ID registered in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "peer-grpc-address", - // Value: "", - // Usage: "The gRPC address of the peer node in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "node-id", - // Value: "", - // Usage: "The node ID for which to retrieve the node information", - //}, cli.StringFlag{ Name: "grpc-address", Value: ":5100", @@ -352,7 +302,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5100", Usage: "The gRPC listen address", }, }, @@ -365,7 +315,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5100", Usage: "The gRPC listen address", }, cli.StringFlag{ @@ -383,7 +333,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5100", Usage: "The gRPC listen address", }, }, @@ -396,7 +346,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5100", Usage: "The gRPC listen address", }, }, @@ -409,7 +359,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5100", Usage: "The gRPC listen address", }, }, diff --git a/cmd/blast/manager_node_info.go b/cmd/blast/manager_node_info.go index c7a206f..55f0e1d 100644 --- a/cmd/blast/manager_node_info.go +++ b/cmd/blast/manager_node_info.go @@ -24,14 +24,8 @@ import ( ) func managerNodeInfo(c *cli.Context) error { - peerGrpcAddr := c.String("peer-grpc-address") - nodeId := c.String("node-id") grpcAddr := c.String("grpc-address") - if peerGrpcAddr != "" { - - } - client, err := manager.NewGRPCClient(grpcAddr) if err != nil { return err @@ -43,7 +37,7 @@ func managerNodeInfo(c *cli.Context) error { } }() - metadata, err := client.NodeInfo(nodeId) + metadata, err := client.NodeInfo() if err != nil { return err } diff --git a/dispatcher/grpc_service.go b/dispatcher/grpc_service.go index 39152ff..ca48e1f 100644 --- a/dispatcher/grpc_service.go +++ b/dispatcher/grpc_service.go @@ -99,7 +99,7 @@ func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { continue } - if node.Status == raft.Leader.String() || node.Status == raft.Follower.String() { + if node.State == raft.Leader.String() || node.State == raft.Follower.String() { var ok bool client, ok = s.managerClients[id] if ok { @@ -108,7 +108,7 @@ func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { s.logger.Error("node does not exist", zap.String("id", id)) } } else { - s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", node.Status)) + s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", node.State)) } } diff --git a/dispatcher/server_test.go b/dispatcher/server_test.go index c762b2d..ec4ff53 100644 --- a/dispatcher/server_test.go +++ b/dispatcher/server_test.go @@ -15,6 +15,7 @@ package dispatcher import ( + "fmt" "os" "path/filepath" "reflect" @@ -26,100 +27,131 @@ import ( "github.com/mosuka/blast/indexer" "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf/management" + "github.com/mosuka/blast/strutils" "github.com/mosuka/blast/testutils" ) func TestServer_Start(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + managerPeerGrpcAddress1 := "" + managerGrpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + managerHttpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + managerNodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + managerBindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + managerDataDir1 := testutils.TmpDir() + managerRaftStorageType1 := "boltdb" + + managerNode1 := &management.Node{ + BindAddress: managerBindAddress1, + State: "", + Metadata: &management.Metadata{ + GrpcAddress: managerGrpcAddress1, + HttpAddress: managerHttpAddress1, + }, + } + + managerIndexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - // - // manager - // - // create cluster config - managerClusterConfig1 := config.DefaultClusterConfig() - // create node config - managerNodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(managerNodeConfig1.DataDir) - }() - // create manager - manager1, err := manager.NewServer(managerClusterConfig1, managerNodeConfig1, indexConfig, logger.Named("manager1"), grpcLogger.Named("manager1"), httpAccessLogger) + // create server + managerServer1, err := manager.NewServer(managerPeerGrpcAddress1, managerNodeId1, managerNode1, managerDataDir1, managerRaftStorageType1, managerIndexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { - if manager1 != nil { - manager1.Stop() + if managerServer1 != nil { + managerServer1.Stop() } }() if err != nil { t.Fatalf("%v", err) } - // start manager - manager1.Start() - // sleep - time.Sleep(5 * time.Second) - // create cluster config - managerClusterConfig2 := config.DefaultClusterConfig() - managerClusterConfig2.PeerAddr = managerNodeConfig1.GRPCAddr - // create node config - managerNodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(managerNodeConfig2.DataDir) - }() - // create manager - manager2, err := manager.NewServer(managerClusterConfig2, managerNodeConfig2, indexConfig, logger.Named("manager2"), grpcLogger.Named("manager2"), httpAccessLogger) + // start server + managerServer1.Start() + + managerPeerGrpcAddress2 := managerGrpcAddress1 + managerGrpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + managerHttpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + managerNodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + managerBindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + managerDataDir2 := testutils.TmpDir() + managerRaftStorageType2 := "boltdb" + + managerNode2 := &management.Node{ + BindAddress: managerBindAddress2, + State: "", + Metadata: &management.Metadata{ + GrpcAddress: managerGrpcAddress2, + HttpAddress: managerHttpAddress2, + }, + } + + managerIndexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + // create server + managerServer2, err := manager.NewServer(managerPeerGrpcAddress2, managerNodeId2, managerNode2, managerDataDir2, managerRaftStorageType2, managerIndexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { - if manager2 != nil { - manager2.Stop() + if managerServer2 != nil { + managerServer2.Stop() } }() if err != nil { t.Fatalf("%v", err) } - // start manager - manager2.Start() - // sleep - time.Sleep(5 * time.Second) - // create cluster config - managerClusterConfig3 := config.DefaultClusterConfig() - managerClusterConfig3.PeerAddr = managerNodeConfig1.GRPCAddr - // create node config - managerNodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(managerNodeConfig3.DataDir) - }() - // create manager - manager3, err := manager.NewServer(managerClusterConfig3, managerNodeConfig3, indexConfig, logger.Named("manager3"), grpcLogger.Named("manager3"), httpAccessLogger) + // start server + managerServer2.Start() + + managerPeerGrpcAddress3 := managerGrpcAddress1 + managerGrpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + managerHttpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + managerNodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + managerBindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + managerDataDir3 := testutils.TmpDir() + managerRaftStorageType3 := "boltdb" + + managerNode3 := &management.Node{ + BindAddress: managerBindAddress3, + State: "", + Metadata: &management.Metadata{ + GrpcAddress: managerGrpcAddress3, + HttpAddress: managerHttpAddress3, + }, + } + + managerIndexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + // create server + managerServer3, err := manager.NewServer(managerPeerGrpcAddress3, managerNodeId3, managerNode3, managerDataDir3, managerRaftStorageType3, managerIndexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { - if manager3 != nil { - manager3.Stop() + if managerServer3 != nil { + managerServer3.Stop() } }() if err != nil { t.Fatalf("%v", err) } - // start manager - manager3.Start() + + // start server + managerServer3.Start() + // sleep time.Sleep(5 * time.Second) // gRPC client for manager1 - managerClient1, err := manager.NewGRPCClient(managerNodeConfig1.GRPCAddr) + managerClient1, err := manager.NewGRPCClient(managerNode1.Metadata.GrpcAddress) defer func() { _ = managerClient1.Close() }() @@ -131,47 +163,37 @@ func TestServer_Start(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - expManagerCluster1 := map[string]interface{}{ - managerNodeConfig1.NodeId: map[string]interface{}{ - "node_config": managerNodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - managerNodeConfig2.NodeId: map[string]interface{}{ - "node_config": managerNodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - managerNodeConfig3.NodeId: map[string]interface{}{ - "node_config": managerNodeConfig3.ToMap(), - "state": raft.Follower.String(), + expManagerCluster1 := &management.Cluster{ + Nodes: map[string]*management.Node{ + managerNodeId1: { + BindAddress: managerBindAddress1, + State: raft.Leader.String(), + Metadata: &management.Metadata{ + GrpcAddress: managerGrpcAddress1, + HttpAddress: managerHttpAddress1, + }, + }, + managerNodeId2: { + BindAddress: managerBindAddress2, + State: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: managerGrpcAddress2, + HttpAddress: managerHttpAddress2, + }, + }, + managerNodeId3: { + BindAddress: managerBindAddress3, + State: raft.Follower.String(), + Metadata: &management.Metadata{ + GrpcAddress: managerGrpcAddress3, + HttpAddress: managerHttpAddress3, + }, + }, }, } actManagerCluster1 := managerCluster1 - expManagerNodeConfig1 := expManagerCluster1[managerNodeConfig1.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actManagerNodeConfig1 := actManagerCluster1[managerNodeConfig1.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expManagerNodeConfig1, actManagerNodeConfig1) { - t.Fatalf("expected content to see %v, saw %v", expManagerNodeConfig1, actManagerNodeConfig1) - } - actManagerState1 := actManagerCluster1[managerNodeConfig1.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actManagerState1 && raft.Follower.String() != actManagerState1 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actManagerState1) - } - expManagerNodeConfig2 := expManagerCluster1[managerNodeConfig2.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actManagerNodeConfig2 := actManagerCluster1[managerNodeConfig2.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expManagerNodeConfig2, actManagerNodeConfig2) { - t.Fatalf("expected content to see %v, saw %v", expManagerNodeConfig2, actManagerNodeConfig2) - } - actManagerState2 := actManagerCluster1[managerNodeConfig2.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actManagerState2 && raft.Follower.String() != actManagerState2 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actManagerState2) - } - expManagerNodeConfig3 := expManagerCluster1[managerNodeConfig3.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actManagerNodeConfig3 := actManagerCluster1[managerNodeConfig3.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expManagerNodeConfig3, actManagerNodeConfig3) { - t.Fatalf("expected content to see %v, saw %v", expManagerNodeConfig3, actManagerNodeConfig3) - } - actManagerState3 := actManagerCluster1[managerNodeConfig3.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actManagerState3 && raft.Follower.String() != actManagerState3 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actManagerState3) + if !reflect.DeepEqual(expManagerCluster1, actManagerCluster1) { + t.Fatalf("expected content to see %v, saw %v", expManagerCluster1, actManagerCluster1) } // @@ -179,7 +201,7 @@ func TestServer_Start(t *testing.T) { // // create cluster config indexerClusterConfig1 := config.DefaultClusterConfig() - indexerClusterConfig1.ManagerAddr = managerNodeConfig1.GRPCAddr + indexerClusterConfig1.ManagerAddr = managerGrpcAddress1 indexerClusterConfig1.ClusterId = "cluster1" // create node config indexerNodeConfig1 := testutils.TmpNodeConfig() @@ -299,7 +321,7 @@ func TestServer_Start(t *testing.T) { // // create cluster config indexerClusterConfig2 := config.DefaultClusterConfig() - indexerClusterConfig2.ManagerAddr = managerNodeConfig1.GRPCAddr + indexerClusterConfig2.ManagerAddr = managerGrpcAddress1 indexerClusterConfig2.ClusterId = "cluster2" // create node config indexerNodeConfig4 := testutils.TmpNodeConfig() @@ -419,7 +441,7 @@ func TestServer_Start(t *testing.T) { // // create cluster config dispatcherClusterConfig1 := config.DefaultClusterConfig() - dispatcherClusterConfig1.ManagerAddr = managerNodeConfig1.GRPCAddr + dispatcherClusterConfig1.ManagerAddr = managerGrpcAddress1 // create node config dispatcherNodeConfig := testutils.TmpNodeConfig() defer func() { diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go index 967572d..88ccf16 100644 --- a/indexer/grpc_service.go +++ b/indexer/grpc_service.go @@ -107,7 +107,7 @@ func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { continue } - if node.Status == raft.Leader.String() || node.Status == raft.Follower.String() { + if node.State == raft.Leader.String() || node.State == raft.Follower.String() { var ok bool client, ok = s.managerClients[id] if ok { @@ -116,7 +116,7 @@ func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { s.logger.Error("node does not exist", zap.String("id", id)) } } else { - s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", node.Status)) + s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", node.State)) } } diff --git a/manager/grpc_client.go b/manager/grpc_client.go index 7fedf16..eff4dd6 100644 --- a/manager/grpc_client.go +++ b/manager/grpc_client.go @@ -109,12 +109,8 @@ func (c *GRPCClient) NodeHealthCheck(probe string, opts ...grpc.CallOption) (str return resp.State.String(), nil } -func (c *GRPCClient) NodeInfo(id string, opts ...grpc.CallOption) (*management.Node, error) { - req := &management.NodeInfoRequest{ - Id: id, - } - - resp, err := c.client.NodeInfo(c.ctx, req, opts...) +func (c *GRPCClient) NodeInfo(opts ...grpc.CallOption) (*management.Node, error) { + resp, err := c.client.NodeInfo(c.ctx, &empty.Empty{}, opts...) if err != nil { st, _ := status.FromError(err) diff --git a/manager/grpc_service.go b/manager/grpc_service.go index 67e5d66..e02c2c4 100644 --- a/manager/grpc_service.go +++ b/manager/grpc_service.go @@ -82,8 +82,8 @@ func (s *GRPCService) getLeaderClient() (*GRPCClient, error) { var client *GRPCClient for id, node := range s.cluster.Nodes { - state := node.Status - if node.Status == "" { + state := node.State + if node.State == "" { s.logger.Warn("missing state", zap.String("id", id), zap.String("state", state)) continue } @@ -140,111 +140,89 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { s.logger.Info("received a request to stop updating a cluster") return case <-ticker.C: - cluster, err := s.getCluster() + s.cluster, err = s.getCluster() if err != nil { s.logger.Error(err.Error()) return } // create latest cluster hash - newClusterHash, err := hashutils.Hash(cluster) + newClusterHash, err := hashutils.Hash(s.cluster) if err != nil { s.logger.Error(err.Error()) return } // create peer node list with out self node - peers := &management.Cluster{Nodes: make(map[string]*management.Node, 0)} - for nodeId, node := range cluster.Nodes { - if nodeId != s.NodeID() { - peers.Nodes[nodeId] = node + for id, node := range s.cluster.Nodes { + if id != s.NodeID() { + s.peers.Nodes[id] = node } } // create latest peers hash - newPeersHash, err := hashutils.Hash(peers) + newPeersHash, err := hashutils.Hash(s.peers) if err != nil { s.logger.Error(err.Error()) return } // compare peers hash - //if !reflect.DeepEqual(s.peers, peers) { if !cmp.Equal(peersHash, newPeersHash) { // open clients - for nodeId, nodeInfo := range peers.Nodes { - if nodeInfo.Metadata == nil { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.Any("metadata", nodeInfo.Metadata)) - continue - } - if nodeInfo.Metadata.GrpcAddress == "" { - s.logger.Warn("missing gRPC address", zap.String("node_id", nodeId), zap.String("grpc_addr", nodeInfo.Metadata.GrpcAddress)) + for id, node := range s.peers.Nodes { + if node.Metadata.GrpcAddress == "" { + s.logger.Warn("missing gRPC address", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) continue } - client, exist := s.peerClients[nodeId] + client, exist := s.peerClients[id] if exist { - s.logger.Debug("client has already exist in peer list", zap.String("node_id", nodeId)) - - if client.GetAddress() != nodeInfo.Metadata.GrpcAddress { - s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", nodeInfo.Metadata.GrpcAddress)) - s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", nodeInfo.Metadata.GrpcAddress)) - delete(s.peerClients, nodeId) + if client.GetAddress() != node.Metadata.GrpcAddress { + s.logger.Info("recreate gRPC client", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + delete(s.peerClients, id) err = client.Close() if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId)) + s.logger.Warn(err.Error(), zap.String("id", id)) } - newClient, err := NewGRPCClient(nodeInfo.Metadata.GrpcAddress) + newClient, err := NewGRPCClient(node.Metadata.GrpcAddress) if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", nodeInfo.Metadata.GrpcAddress)) + s.logger.Error(err.Error(), zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + continue } - if newClient != nil { - s.peerClients[nodeId] = newClient - } - } else { - s.logger.Debug("gRPC address has not changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", nodeInfo.Metadata.GrpcAddress)) + s.peerClients[id] = newClient } } else { - s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", nodeInfo.Metadata.GrpcAddress)) - peerClient, err := NewGRPCClient(nodeInfo.Metadata.GrpcAddress) + s.logger.Info("create gRPC client", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + newClient, err := NewGRPCClient(node.Metadata.GrpcAddress) if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", nodeInfo.Metadata.GrpcAddress)) - } - if peerClient != nil { - s.logger.Debug("append peer client to peer client list", zap.String("grpc_addr", peerClient.GetAddress())) - s.peerClients[nodeId] = peerClient + s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + continue } + s.peerClients[id] = newClient } } - // close nonexistent clients - for nodeId, client := range s.peerClients { - if nodeConfig, exist := peers.Nodes[nodeId]; !exist { - s.logger.Info("this client is no longer in use", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - - s.logger.Debug("close client", zap.String("node_id", nodeId), zap.String("grpc_addr", client.GetAddress())) + // close client for non-existent node + for id, client := range s.peerClients { + if _, exist := s.peers.Nodes[id]; !exist { + s.logger.Info("close gRPC client", zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) err = client.Close() if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", client.GetAddress())) + s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) } - - s.logger.Debug("delete client", zap.String("node_id", nodeId)) - delete(s.peerClients, nodeId) + delete(s.peerClients, id) } } - // keep current peer nodes - s.logger.Debug("current peers", zap.Any("peers", peers)) - s.peers = peers - } else { - s.logger.Debug("there is no change in peers", zap.Any("peers", peers)) + // update peers hash + peersHash = newPeersHash } // compare cluster hash if !cmp.Equal(clusterHash, newClusterHash) { clusterResp := &management.ClusterInfoResponse{ - Cluster: cluster, + Cluster: s.cluster, } // output to channel @@ -252,12 +230,8 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { c <- *clusterResp } - // keep current cluster - s.logger.Debug("current cluster", zap.Any("cluster", cluster)) - // TODO: overwrite cluster hash + // update cluster hash clusterHash = newClusterHash - } else { - s.logger.Debug("there is no change in cluster", zap.Any("cluster", cluster)) } default: time.Sleep(100 * time.Millisecond) @@ -306,7 +280,7 @@ func (s *GRPCService) NodeID() string { func (s *GRPCService) getSelfNode() (*management.Node, error) { node := s.raftServer.node - node.Status = s.raftServer.State().String() + node.State = s.raftServer.State().String() return node, nil } @@ -316,20 +290,19 @@ func (s *GRPCService) getPeerNode(id string) (*management.Node, error) { var err error if peerClient, exist := s.peerClients[id]; exist { - nodeInfo, err = peerClient.NodeInfo(id) + nodeInfo, err = peerClient.NodeInfo() if err != nil { - s.logger.Warn(err.Error()) + s.logger.Debug(err.Error()) nodeInfo = &management.Node{ BindAddress: "", - Status: raft.Shutdown.String(), + State: raft.Shutdown.String(), Metadata: &management.Metadata{}, } } } else { - s.logger.Warn("node does not exist in peer list", zap.String("id", id)) nodeInfo = &management.Node{ BindAddress: "", - Status: raft.Shutdown.String(), + State: raft.Shutdown.String(), Metadata: &management.Metadata{}, } } @@ -355,10 +328,10 @@ func (s *GRPCService) getNode(id string) (*management.Node, error) { return nodeInfo, nil } -func (s *GRPCService) NodeInfo(ctx context.Context, req *management.NodeInfoRequest) (*management.NodeInfoResponse, error) { +func (s *GRPCService) NodeInfo(ctx context.Context, req *empty.Empty) (*management.NodeInfoResponse, error) { resp := &management.NodeInfoResponse{} - nodeInfo, err := s.getNode(req.Id) + nodeInfo, err := s.getNode(s.NodeID()) if err != nil { s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) @@ -455,7 +428,7 @@ func (s *GRPCService) getCluster() (*management.Cluster, error) { s.logger.Warn(err.Error()) continue } - cluster.Nodes[nodeId].Status = node.Status + cluster.Nodes[nodeId].State = node.State } return cluster, nil diff --git a/manager/raft_fsm_test.go b/manager/raft_fsm_test.go index 1b6a243..983ff68 100644 --- a/manager/raft_fsm_test.go +++ b/manager/raft_fsm_test.go @@ -58,7 +58,7 @@ func TestRaftFSM_GetNode(t *testing.T) { "node1", &management.Node{ BindAddress: "2100", - Status: raft.Leader.String(), + State: raft.Leader.String(), Metadata: &management.Metadata{ GrpcAddress: "5100", HttpAddress: "8100", @@ -69,7 +69,7 @@ func TestRaftFSM_GetNode(t *testing.T) { "node2", &management.Node{ BindAddress: "2110", - Status: raft.Follower.String(), + State: raft.Follower.String(), Metadata: &management.Metadata{ GrpcAddress: "5110", HttpAddress: "8110", @@ -80,7 +80,7 @@ func TestRaftFSM_GetNode(t *testing.T) { "node3", &management.Node{ BindAddress: "2120", - Status: raft.Follower.String(), + State: raft.Follower.String(), Metadata: &management.Metadata{ GrpcAddress: "5120", HttpAddress: "8120", @@ -95,7 +95,7 @@ func TestRaftFSM_GetNode(t *testing.T) { exp1 := &management.Node{ BindAddress: "2110", - Status: raft.Follower.String(), + State: raft.Follower.String(), Metadata: &management.Metadata{ GrpcAddress: "5110", HttpAddress: "8110", @@ -142,7 +142,7 @@ func TestRaftFSM_SetNode(t *testing.T) { "node1", &management.Node{ BindAddress: "2100", - Status: raft.Leader.String(), + State: raft.Leader.String(), Metadata: &management.Metadata{ GrpcAddress: "5100", HttpAddress: "8100", @@ -153,7 +153,7 @@ func TestRaftFSM_SetNode(t *testing.T) { "node2", &management.Node{ BindAddress: "2110", - Status: raft.Follower.String(), + State: raft.Follower.String(), Metadata: &management.Metadata{ GrpcAddress: "5110", HttpAddress: "8110", @@ -164,7 +164,7 @@ func TestRaftFSM_SetNode(t *testing.T) { "node3", &management.Node{ BindAddress: "2120", - Status: raft.Follower.String(), + State: raft.Follower.String(), Metadata: &management.Metadata{ GrpcAddress: "5120", HttpAddress: "8120", @@ -178,7 +178,7 @@ func TestRaftFSM_SetNode(t *testing.T) { } exp1 := &management.Node{ BindAddress: "2110", - Status: raft.Follower.String(), + State: raft.Follower.String(), Metadata: &management.Metadata{ GrpcAddress: "5110", HttpAddress: "8110", @@ -193,7 +193,7 @@ func TestRaftFSM_SetNode(t *testing.T) { "node2", &management.Node{ BindAddress: "2110", - Status: raft.Shutdown.String(), + State: raft.Shutdown.String(), Metadata: &management.Metadata{ GrpcAddress: "5110", HttpAddress: "8110", @@ -207,7 +207,7 @@ func TestRaftFSM_SetNode(t *testing.T) { } exp2 := &management.Node{ BindAddress: "2110", - Status: raft.Shutdown.String(), + State: raft.Shutdown.String(), Metadata: &management.Metadata{ GrpcAddress: "5110", HttpAddress: "8110", @@ -253,7 +253,7 @@ func TestRaftFSM_DeleteNode(t *testing.T) { "node1", &management.Node{ BindAddress: "2100", - Status: raft.Leader.String(), + State: raft.Leader.String(), Metadata: &management.Metadata{ GrpcAddress: "5100", HttpAddress: "8100", @@ -264,7 +264,7 @@ func TestRaftFSM_DeleteNode(t *testing.T) { "node2", &management.Node{ BindAddress: "2110", - Status: raft.Follower.String(), + State: raft.Follower.String(), Metadata: &management.Metadata{ GrpcAddress: "5110", HttpAddress: "8110", @@ -275,7 +275,7 @@ func TestRaftFSM_DeleteNode(t *testing.T) { "node3", &management.Node{ BindAddress: "2120", - Status: raft.Follower.String(), + State: raft.Follower.String(), Metadata: &management.Metadata{ GrpcAddress: "5120", HttpAddress: "8120", @@ -289,7 +289,7 @@ func TestRaftFSM_DeleteNode(t *testing.T) { } exp1 := &management.Node{ BindAddress: "2110", - Status: raft.Follower.String(), + State: raft.Follower.String(), Metadata: &management.Metadata{ GrpcAddress: "5110", HttpAddress: "8110", diff --git a/manager/raft_server.go b/manager/raft_server.go index b998de7..e1eee46 100644 --- a/manager/raft_server.go +++ b/manager/raft_server.go @@ -208,7 +208,7 @@ func (s *RaftServer) Start() error { // set node config s.logger.Info("register its own node config", zap.String("node_id", s.nodeId), zap.Any("node", s.node)) - err = s.setNodeConfig(s.nodeId, s.node) + err = s.setNode(s.nodeId, s.node) if err != nil { s.logger.Fatal(err.Error()) return err @@ -317,17 +317,17 @@ func (s *RaftServer) WaitForDetectLeader(timeout time.Duration) error { return nil } -func (s *RaftServer) getNodeConfig(nodeId string) (*management.Node, error) { +func (s *RaftServer) getNode(nodeId string) (*management.Node, error) { nodeConfig, err := s.fsm.GetNodeConfig(nodeId) if err != nil { - s.logger.Error(err.Error()) + s.logger.Debug(err.Error(), zap.String("id", nodeId)) return nil, err } return nodeConfig, nil } -func (s *RaftServer) setNodeConfig(nodeId string, node *management.Node) error { +func (s *RaftServer) setNode(nodeId string, node *management.Node) error { msg, err := newMessage( setNode, map[string]interface{}{ @@ -336,32 +336,32 @@ func (s *RaftServer) setNodeConfig(nodeId string, node *management.Node) error { }, ) if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId), zap.Any("node", node)) return err } msgBytes, err := json.Marshal(msg) if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId), zap.Any("node", node)) return err } f := s.raft.Apply(msgBytes, 10*time.Second) err = f.Error() if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId), zap.Any("node", node)) return err } err = f.Response().(*fsmResponse).error if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId), zap.Any("node", node)) return err } return nil } -func (s *RaftServer) deleteNodeConfig(nodeId string) error { +func (s *RaftServer) deleteNode(nodeId string) error { msg, err := newMessage( deleteNode, map[string]interface{}{ @@ -369,25 +369,25 @@ func (s *RaftServer) deleteNodeConfig(nodeId string) error { }, ) if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId)) return err } msgBytes, err := json.Marshal(msg) if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId)) return err } f := s.raft.Apply(msgBytes, 10*time.Second) err = f.Error() if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId)) return err } err = f.Response().(*fsmResponse).error if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId)) return err } @@ -405,9 +405,9 @@ func (s *RaftServer) GetNode(id string) (*management.Node, error) { var node *management.Node for _, server := range cf.Configuration().Servers { if server.ID == raft.ServerID(id) { - node, err = s.getNodeConfig(id) + node, err = s.getNode(id) if err != nil { - s.logger.Error(err.Error()) + s.logger.Debug(err.Error(), zap.String("id", id)) return nil, err } break @@ -444,18 +444,18 @@ func (s *RaftServer) SetNode(nodeId string, node *management.Node) error { } // add node to Raft cluster - s.logger.Info("add voter", zap.String("nodeId", nodeId), zap.Any("node", node)) + s.logger.Info("join the node to the raft cluster", zap.String("id", nodeId), zap.Any("bind_address", node.BindAddress)) f := s.raft.AddVoter(raft.ServerID(nodeId), raft.ServerAddress(node.BindAddress), 0, 0) err = f.Error() if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("bind_address", node.BindAddress)) return err } // set node config - err = s.setNodeConfig(nodeId, node) + err = s.setNode(nodeId, node) if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId), zap.Any("node", node)) return err } @@ -471,27 +471,27 @@ func (s *RaftServer) DeleteNode(nodeId string) error { cf := s.raft.GetConfiguration() err := cf.Error() if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId)) return err } // delete node from Raft cluster for _, server := range cf.Configuration().Servers { if server.ID == raft.ServerID(nodeId) { - s.logger.Debug("remove server", zap.String("node_id", nodeId)) + s.logger.Info("remove the node from the raft cluster", zap.String("id", nodeId)) f := s.raft.RemoveServer(server.ID, 0, 0) err = f.Error() if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", string(server.ID))) return err } } } // delete node config - err = s.deleteNodeConfig(nodeId) + err = s.deleteNode(nodeId) if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId)) return err } @@ -510,7 +510,7 @@ func (s *RaftServer) GetCluster() (*management.Cluster, error) { for _, server := range cf.Configuration().Servers { node, err := s.GetNode(string(server.ID)) if err != nil { - s.logger.Warn(err.Error()) + s.logger.Debug(err.Error(), zap.String("id", string(server.ID))) continue } diff --git a/manager/server_test.go b/manager/server_test.go index e5dae37..e389942 100644 --- a/manager/server_test.go +++ b/manager/server_test.go @@ -47,7 +47,7 @@ func TestServer_Start(t *testing.T) { node := &management.Node{ BindAddress: bindAddress, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress, HttpAddress: httpAddress, @@ -94,7 +94,7 @@ func TestServer_HealthCheck(t *testing.T) { node := &management.Node{ BindAddress: bindAddress, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress, HttpAddress: httpAddress, @@ -188,7 +188,7 @@ func TestServer_GetNode(t *testing.T) { node := &management.Node{ BindAddress: bindAddress, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress, HttpAddress: httpAddress, @@ -232,13 +232,13 @@ func TestServer_GetNode(t *testing.T) { } // get node - nodeInfo, err := client.NodeInfo(nodeId) + nodeInfo, err := client.NodeInfo() if err != nil { t.Fatalf("%v", err) } expNodeInfo := &management.Node{ BindAddress: bindAddress, - Status: raft.Leader.String(), + State: raft.Leader.String(), Metadata: &management.Metadata{ GrpcAddress: grpcAddress, HttpAddress: httpAddress, @@ -267,7 +267,7 @@ func TestServer_GetCluster(t *testing.T) { node := &management.Node{ BindAddress: bindAddress, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress, HttpAddress: httpAddress, @@ -319,7 +319,7 @@ func TestServer_GetCluster(t *testing.T) { Nodes: map[string]*management.Node{ nodeId: { BindAddress: bindAddress, - Status: raft.Leader.String(), + State: raft.Leader.String(), Metadata: &management.Metadata{ GrpcAddress: grpcAddress, HttpAddress: httpAddress, @@ -350,7 +350,7 @@ func TestServer_SetState(t *testing.T) { node := &management.Node{ BindAddress: bindAddress, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress, HttpAddress: httpAddress, @@ -431,7 +431,7 @@ func TestServer_GetState(t *testing.T) { node := &management.Node{ BindAddress: bindAddress, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress, HttpAddress: httpAddress, @@ -512,7 +512,7 @@ func TestServer_DeleteState(t *testing.T) { node := &management.Node{ BindAddress: bindAddress, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress, HttpAddress: httpAddress, @@ -614,7 +614,7 @@ func TestCluster_Start(t *testing.T) { node1 := &management.Node{ BindAddress: bindAddress1, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -650,7 +650,7 @@ func TestCluster_Start(t *testing.T) { node2 := &management.Node{ BindAddress: bindAddress2, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -686,7 +686,7 @@ func TestCluster_Start(t *testing.T) { node3 := &management.Node{ BindAddress: bindAddress3, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, @@ -733,7 +733,7 @@ func TestCluster_HealthCheck(t *testing.T) { node1 := &management.Node{ BindAddress: bindAddress1, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -769,7 +769,7 @@ func TestCluster_HealthCheck(t *testing.T) { node2 := &management.Node{ BindAddress: bindAddress2, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -805,7 +805,7 @@ func TestCluster_HealthCheck(t *testing.T) { node3 := &management.Node{ BindAddress: bindAddress3, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, @@ -975,7 +975,7 @@ func TestCluster_GetNode(t *testing.T) { node1 := &management.Node{ BindAddress: bindAddress1, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -1011,7 +1011,7 @@ func TestCluster_GetNode(t *testing.T) { node2 := &management.Node{ BindAddress: bindAddress2, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -1047,7 +1047,7 @@ func TestCluster_GetNode(t *testing.T) { node3 := &management.Node{ BindAddress: bindAddress3, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, @@ -1100,13 +1100,13 @@ func TestCluster_GetNode(t *testing.T) { } // get all node info from all nodes - node11, err := client1.NodeInfo(nodeId1) + node11, err := client1.NodeInfo() if err != nil { t.Fatalf("%v", err) } expNode11 := &management.Node{ BindAddress: bindAddress1, - Status: raft.Leader.String(), + State: raft.Leader.String(), Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -1117,141 +1117,39 @@ func TestCluster_GetNode(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expNode11, actNode11) } - node12, err := client1.NodeInfo(nodeId2) + node21, err := client2.NodeInfo() if err != nil { t.Fatalf("%v", err) } - expNode12 := &management.Node{ + expNode21 := &management.Node{ BindAddress: bindAddress2, - Status: raft.Follower.String(), + State: raft.Follower.String(), Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, }, } - actNode12 := node12 - if !reflect.DeepEqual(expNode12, actNode12) { - t.Fatalf("expected content to see %v, saw %v", expNode12, actNode12) - } - - node13, err := client1.NodeInfo(nodeId3) - if err != nil { - t.Fatalf("%v", err) - } - expNode13 := &management.Node{ - BindAddress: bindAddress3, - Status: raft.Follower.String(), - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, - }, - } - actNode13 := node13 - if !reflect.DeepEqual(expNode13, actNode13) { - t.Fatalf("expected content to see %v, saw %v", expNode13, actNode13) - } - - node21, err := client2.NodeInfo(nodeId1) - if err != nil { - t.Fatalf("%v", err) - } - expNode21 := &management.Node{ - BindAddress: bindAddress1, - Status: raft.Leader.String(), - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, - }, - } actNode21 := node21 if !reflect.DeepEqual(expNode21, actNode21) { t.Fatalf("expected content to see %v, saw %v", expNode21, actNode21) } - node22, err := client2.NodeInfo(nodeId2) - if err != nil { - t.Fatalf("%v", err) - } - expNode22 := &management.Node{ - BindAddress: bindAddress2, - Status: raft.Follower.String(), - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, - }, - } - actNode22 := node22 - if !reflect.DeepEqual(expNode22, actNode22) { - t.Fatalf("expected content to see %v, saw %v", expNode22, actNode22) - } - - node23, err := client2.NodeInfo(nodeId3) + node31, err := client3.NodeInfo() if err != nil { t.Fatalf("%v", err) } - expNode23 := &management.Node{ + expNode31 := &management.Node{ BindAddress: bindAddress3, - Status: raft.Follower.String(), + State: raft.Follower.String(), Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, }, } - actNode23 := node23 - if !reflect.DeepEqual(expNode23, actNode23) { - t.Fatalf("expected content to see %v, saw %v", expNode23, actNode23) - } - - node31, err := client3.NodeInfo(nodeId1) - if err != nil { - t.Fatalf("%v", err) - } - expNode31 := &management.Node{ - BindAddress: bindAddress1, - Status: raft.Leader.String(), - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, - }, - } actNode31 := node31 if !reflect.DeepEqual(expNode31, actNode31) { t.Fatalf("expected content to see %v, saw %v", expNode31, actNode31) } - - node32, err := client3.NodeInfo(nodeId2) - if err != nil { - t.Fatalf("%v", err) - } - expNode32 := &management.Node{ - BindAddress: bindAddress2, - Status: raft.Follower.String(), - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, - }, - } - actNode32 := node32 - if !reflect.DeepEqual(expNode32, actNode32) { - t.Fatalf("expected content to see %v, saw %v", expNode32, actNode32) - } - - node33, err := client3.NodeInfo(nodeId3) - if err != nil { - t.Fatalf("%v", err) - } - expNode33 := &management.Node{ - BindAddress: bindAddress3, - Status: raft.Follower.String(), - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, - }, - } - actNode33 := node33 - if !reflect.DeepEqual(expNode33, actNode33) { - t.Fatalf("expected content to see %v, saw %v", expNode33, actNode33) - } } func TestCluster_GetCluster(t *testing.T) { @@ -1271,7 +1169,7 @@ func TestCluster_GetCluster(t *testing.T) { node1 := &management.Node{ BindAddress: bindAddress1, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -1307,7 +1205,7 @@ func TestCluster_GetCluster(t *testing.T) { node2 := &management.Node{ BindAddress: bindAddress2, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -1343,7 +1241,7 @@ func TestCluster_GetCluster(t *testing.T) { node3 := &management.Node{ BindAddress: bindAddress3, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, @@ -1404,7 +1302,7 @@ func TestCluster_GetCluster(t *testing.T) { Nodes: map[string]*management.Node{ nodeId1: { BindAddress: bindAddress1, - Status: raft.Leader.String(), + State: raft.Leader.String(), Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -1412,7 +1310,7 @@ func TestCluster_GetCluster(t *testing.T) { }, nodeId2: { BindAddress: bindAddress2, - Status: raft.Follower.String(), + State: raft.Follower.String(), Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -1420,7 +1318,7 @@ func TestCluster_GetCluster(t *testing.T) { }, nodeId3: { BindAddress: bindAddress3, - Status: raft.Follower.String(), + State: raft.Follower.String(), Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, @@ -1441,7 +1339,7 @@ func TestCluster_GetCluster(t *testing.T) { Nodes: map[string]*management.Node{ nodeId1: { BindAddress: bindAddress1, - Status: raft.Leader.String(), + State: raft.Leader.String(), Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -1449,7 +1347,7 @@ func TestCluster_GetCluster(t *testing.T) { }, nodeId2: { BindAddress: bindAddress2, - Status: raft.Follower.String(), + State: raft.Follower.String(), Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -1457,7 +1355,7 @@ func TestCluster_GetCluster(t *testing.T) { }, nodeId3: { BindAddress: bindAddress3, - Status: raft.Follower.String(), + State: raft.Follower.String(), Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, @@ -1478,7 +1376,7 @@ func TestCluster_GetCluster(t *testing.T) { Nodes: map[string]*management.Node{ nodeId1: { BindAddress: bindAddress1, - Status: raft.Leader.String(), + State: raft.Leader.String(), Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -1486,7 +1384,7 @@ func TestCluster_GetCluster(t *testing.T) { }, nodeId2: { BindAddress: bindAddress2, - Status: raft.Follower.String(), + State: raft.Follower.String(), Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -1494,7 +1392,7 @@ func TestCluster_GetCluster(t *testing.T) { }, nodeId3: { BindAddress: bindAddress3, - Status: raft.Follower.String(), + State: raft.Follower.String(), Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, @@ -1525,7 +1423,7 @@ func TestCluster_SetState(t *testing.T) { node1 := &management.Node{ BindAddress: bindAddress1, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -1561,7 +1459,7 @@ func TestCluster_SetState(t *testing.T) { node2 := &management.Node{ BindAddress: bindAddress2, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -1597,7 +1495,7 @@ func TestCluster_SetState(t *testing.T) { node3 := &management.Node{ BindAddress: bindAddress3, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, @@ -1772,7 +1670,7 @@ func TestCluster_GetState(t *testing.T) { node1 := &management.Node{ BindAddress: bindAddress1, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -1808,7 +1706,7 @@ func TestCluster_GetState(t *testing.T) { node2 := &management.Node{ BindAddress: bindAddress2, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -1844,7 +1742,7 @@ func TestCluster_GetState(t *testing.T) { node3 := &management.Node{ BindAddress: bindAddress3, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, @@ -2019,7 +1917,7 @@ func TestCluster_DeleteState(t *testing.T) { node1 := &management.Node{ BindAddress: bindAddress1, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -2055,7 +1953,7 @@ func TestCluster_DeleteState(t *testing.T) { node2 := &management.Node{ BindAddress: bindAddress2, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -2091,7 +1989,7 @@ func TestCluster_DeleteState(t *testing.T) { node3 := &management.Node{ BindAddress: bindAddress3, - Status: "", + State: "", Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, diff --git a/protobuf/management/management.pb.go b/protobuf/management/management.pb.go index 8125b30..8b8235c 100644 --- a/protobuf/management/management.pb.go +++ b/protobuf/management/management.pb.go @@ -114,7 +114,7 @@ func (x WatchResponse_Command) String() string { } func (WatchResponse_Command) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{15, 0} + return fileDescriptor_5e030ad796566078, []int{14, 0} } type NodeHealthCheckRequest struct { @@ -245,7 +245,7 @@ func (m *Metadata) GetHttpAddress() string { type Node struct { BindAddress string `protobuf:"bytes,1,opt,name=bind_address,json=bindAddress,proto3" json:"bind_address,omitempty"` - Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` + State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` Metadata *Metadata `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -284,9 +284,9 @@ func (m *Node) GetBindAddress() string { return "" } -func (m *Node) GetStatus() string { +func (m *Node) GetState() string { if m != nil { - return m.Status + return m.State } return "" } @@ -337,45 +337,6 @@ func (m *Cluster) GetNodes() map[string]*Node { return nil } -type NodeInfoRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeInfoRequest) Reset() { *m = NodeInfoRequest{} } -func (m *NodeInfoRequest) String() string { return proto.CompactTextString(m) } -func (*NodeInfoRequest) ProtoMessage() {} -func (*NodeInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{5} -} - -func (m *NodeInfoRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeInfoRequest.Unmarshal(m, b) -} -func (m *NodeInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeInfoRequest.Marshal(b, m, deterministic) -} -func (m *NodeInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeInfoRequest.Merge(m, src) -} -func (m *NodeInfoRequest) XXX_Size() int { - return xxx_messageInfo_NodeInfoRequest.Size(m) -} -func (m *NodeInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeInfoRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeInfoRequest proto.InternalMessageInfo - -func (m *NodeInfoRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - type NodeInfoResponse struct { Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -387,7 +348,7 @@ func (m *NodeInfoResponse) Reset() { *m = NodeInfoResponse{} } func (m *NodeInfoResponse) String() string { return proto.CompactTextString(m) } func (*NodeInfoResponse) ProtoMessage() {} func (*NodeInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{6} + return fileDescriptor_5e030ad796566078, []int{5} } func (m *NodeInfoResponse) XXX_Unmarshal(b []byte) error { @@ -427,7 +388,7 @@ func (m *ClusterJoinRequest) Reset() { *m = ClusterJoinRequest{} } func (m *ClusterJoinRequest) String() string { return proto.CompactTextString(m) } func (*ClusterJoinRequest) ProtoMessage() {} func (*ClusterJoinRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{7} + return fileDescriptor_5e030ad796566078, []int{6} } func (m *ClusterJoinRequest) XXX_Unmarshal(b []byte) error { @@ -473,7 +434,7 @@ func (m *ClusterLeaveRequest) Reset() { *m = ClusterLeaveRequest{} } func (m *ClusterLeaveRequest) String() string { return proto.CompactTextString(m) } func (*ClusterLeaveRequest) ProtoMessage() {} func (*ClusterLeaveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{8} + return fileDescriptor_5e030ad796566078, []int{7} } func (m *ClusterLeaveRequest) XXX_Unmarshal(b []byte) error { @@ -512,7 +473,7 @@ func (m *ClusterInfoResponse) Reset() { *m = ClusterInfoResponse{} } func (m *ClusterInfoResponse) String() string { return proto.CompactTextString(m) } func (*ClusterInfoResponse) ProtoMessage() {} func (*ClusterInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{9} + return fileDescriptor_5e030ad796566078, []int{8} } func (m *ClusterInfoResponse) XXX_Unmarshal(b []byte) error { @@ -551,7 +512,7 @@ func (m *GetRequest) Reset() { *m = GetRequest{} } func (m *GetRequest) String() string { return proto.CompactTextString(m) } func (*GetRequest) ProtoMessage() {} func (*GetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{10} + return fileDescriptor_5e030ad796566078, []int{9} } func (m *GetRequest) XXX_Unmarshal(b []byte) error { @@ -590,7 +551,7 @@ func (m *GetResponse) Reset() { *m = GetResponse{} } func (m *GetResponse) String() string { return proto.CompactTextString(m) } func (*GetResponse) ProtoMessage() {} func (*GetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{11} + return fileDescriptor_5e030ad796566078, []int{10} } func (m *GetResponse) XXX_Unmarshal(b []byte) error { @@ -630,7 +591,7 @@ func (m *SetRequest) Reset() { *m = SetRequest{} } func (m *SetRequest) String() string { return proto.CompactTextString(m) } func (*SetRequest) ProtoMessage() {} func (*SetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{12} + return fileDescriptor_5e030ad796566078, []int{11} } func (m *SetRequest) XXX_Unmarshal(b []byte) error { @@ -676,7 +637,7 @@ func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } func (*DeleteRequest) ProtoMessage() {} func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{13} + return fileDescriptor_5e030ad796566078, []int{12} } func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { @@ -715,7 +676,7 @@ func (m *WatchRequest) Reset() { *m = WatchRequest{} } func (m *WatchRequest) String() string { return proto.CompactTextString(m) } func (*WatchRequest) ProtoMessage() {} func (*WatchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{14} + return fileDescriptor_5e030ad796566078, []int{13} } func (m *WatchRequest) XXX_Unmarshal(b []byte) error { @@ -756,7 +717,7 @@ func (m *WatchResponse) Reset() { *m = WatchResponse{} } func (m *WatchResponse) String() string { return proto.CompactTextString(m) } func (*WatchResponse) ProtoMessage() {} func (*WatchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{15} + return fileDescriptor_5e030ad796566078, []int{14} } func (m *WatchResponse) XXX_Unmarshal(b []byte) error { @@ -808,7 +769,6 @@ func init() { proto.RegisterType((*Node)(nil), "management.Node") proto.RegisterType((*Cluster)(nil), "management.Cluster") proto.RegisterMapType((map[string]*Node)(nil), "management.Cluster.NodesEntry") - proto.RegisterType((*NodeInfoRequest)(nil), "management.NodeInfoRequest") proto.RegisterType((*NodeInfoResponse)(nil), "management.NodeInfoResponse") proto.RegisterType((*ClusterJoinRequest)(nil), "management.ClusterJoinRequest") proto.RegisterType((*ClusterLeaveRequest)(nil), "management.ClusterLeaveRequest") @@ -826,61 +786,60 @@ func init() { } var fileDescriptor_5e030ad796566078 = []byte{ - // 855 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xff, 0x6f, 0xda, 0x46, - 0x14, 0xc7, 0x76, 0x0c, 0xe4, 0x39, 0x69, 0xad, 0xeb, 0x94, 0x26, 0xee, 0xd4, 0x25, 0xb7, 0x6e, - 0xca, 0x56, 0xd5, 0x54, 0x6c, 0xd3, 0xb2, 0xef, 0x63, 0xc1, 0x4a, 0xa0, 0x94, 0x56, 0x86, 0xae, - 0xea, 0x34, 0xa9, 0x3a, 0xf0, 0x15, 0x50, 0xb0, 0xcd, 0xf0, 0x11, 0x89, 0xbf, 0x61, 0x3f, 0x6c, - 0x7f, 0xc9, 0x7e, 0xdd, 0xbf, 0x37, 0x9d, 0xef, 0x6c, 0x1c, 0xb0, 0xc9, 0xa4, 0xfe, 0xe6, 0x7b, - 0xef, 0xf3, 0x3e, 0xf7, 0x79, 0x2f, 0xf7, 0x79, 0x01, 0x1e, 0xcd, 0xe6, 0x21, 0x0b, 0x07, 0x8b, - 0x77, 0x35, 0x9f, 0x04, 0x64, 0x44, 0x7d, 0x1a, 0xb0, 0xcc, 0xa7, 0x1d, 0xa7, 0x11, 0xac, 0x22, - 0xd6, 0xd1, 0x28, 0x0c, 0x47, 0x53, 0x5a, 0x4b, 0x0b, 0x49, 0xb0, 0x14, 0x30, 0xeb, 0xc1, 0x7a, - 0x8a, 0xfa, 0x33, 0x26, 0x93, 0xf8, 0x2f, 0x05, 0x0e, 0xba, 0xa1, 0x47, 0x2f, 0x29, 0x99, 0xb2, - 0xf1, 0xf9, 0x98, 0x0e, 0xaf, 0x5c, 0xfa, 0xc7, 0x82, 0x46, 0x0c, 0xfd, 0x08, 0xfa, 0x6c, 0x1e, - 0x0e, 0xe8, 0xa1, 0x72, 0xac, 0x9c, 0xde, 0xa9, 0x9f, 0xda, 0x19, 0x01, 0xf9, 0x25, 0xf6, 0x4b, - 0x8e, 0x77, 0x45, 0x19, 0xfe, 0x0a, 0xf4, 0xf8, 0x8c, 0xee, 0x82, 0x71, 0xe9, 0x34, 0x3a, 0xfd, - 0xcb, 0x56, 0xd7, 0xe9, 0xf5, 0xcc, 0x12, 0xda, 0x83, 0x6a, 0xa7, 0xf5, 0xab, 0x13, 0x9f, 0x14, - 0xb4, 0x0f, 0xbb, 0xae, 0xd3, 0x68, 0x8a, 0xa4, 0x8a, 0xff, 0x51, 0xe0, 0xfe, 0x06, 0x7d, 0x34, - 0x0b, 0x83, 0x88, 0xa2, 0x9f, 0x40, 0x8f, 0x18, 0x61, 0x89, 0xa4, 0xcf, 0xb6, 0x4a, 0x12, 0x35, - 0x76, 0x8f, 0x17, 0xb8, 0xa2, 0x0e, 0xbb, 0xa0, 0xc7, 0x67, 0x64, 0x40, 0x45, 0x68, 0x7a, 0x63, - 0x96, 0xb8, 0x82, 0x57, 0xdd, 0xe4, 0xa8, 0xa0, 0x5d, 0xd0, 0x1b, 0x5c, 0x9f, 0xa9, 0xa2, 0x2a, - 0xec, 0x34, 0x9d, 0x46, 0xd3, 0xd4, 0x78, 0x90, 0xab, 0x7c, 0x63, 0xee, 0x70, 0x78, 0xf7, 0x45, - 0xff, 0xad, 0x38, 0xea, 0xf8, 0x25, 0x54, 0x9f, 0x53, 0x46, 0x3c, 0xc2, 0x08, 0x3a, 0x81, 0xbd, - 0xd1, 0x7c, 0x36, 0x7c, 0x4b, 0x3c, 0x6f, 0x4e, 0xa3, 0x28, 0xd6, 0xb9, 0xeb, 0x1a, 0x3c, 0xd6, - 0x10, 0x21, 0x0e, 0x19, 0x33, 0x36, 0x4b, 0x21, 0xaa, 0x80, 0xf0, 0x98, 0x84, 0xe0, 0x08, 0x76, - 0x78, 0x37, 0x1c, 0x3a, 0x98, 0x04, 0xde, 0x3a, 0x1b, 0x8f, 0x25, 0x6c, 0x07, 0x50, 0xe6, 0x9d, - 0x2d, 0x12, 0x1e, 0x79, 0x42, 0x4f, 0xa1, 0xea, 0x4b, 0x51, 0x87, 0xda, 0xb1, 0x72, 0x6a, 0xd4, - 0x3f, 0xc8, 0x0e, 0x2b, 0x11, 0xec, 0xa6, 0x28, 0xfc, 0xa7, 0x02, 0x95, 0xf3, 0xe9, 0x22, 0x62, - 0x74, 0x8e, 0xbe, 0x04, 0x3d, 0x08, 0x3d, 0xca, 0x6f, 0xd4, 0x4e, 0x8d, 0xfa, 0xc3, 0x6c, 0xa9, - 0xc4, 0xc4, 0xf3, 0x8e, 0x9c, 0x80, 0xcd, 0x97, 0xae, 0x00, 0x5b, 0x6d, 0x80, 0x55, 0x10, 0x99, - 0xa0, 0x5d, 0xd1, 0xa5, 0xd4, 0xcc, 0x3f, 0xd1, 0xa7, 0xa0, 0x5f, 0x93, 0xe9, 0x82, 0xc6, 0x52, - 0x8d, 0xba, 0xb9, 0xfe, 0xd7, 0x73, 0x45, 0xfa, 0x5b, 0xf5, 0x4c, 0xc1, 0x27, 0x70, 0x97, 0x87, - 0x5a, 0xc1, 0xbb, 0x30, 0x79, 0x8f, 0x77, 0x40, 0x9d, 0x78, 0x92, 0x4f, 0x9d, 0x78, 0xf8, 0x0c, - 0xcc, 0x15, 0x44, 0x3e, 0x90, 0x47, 0xb0, 0xc3, 0xb5, 0xc4, 0xa8, 0xbc, 0x1b, 0xe2, 0x2c, 0x6e, - 0x03, 0x92, 0x5d, 0xb4, 0xc3, 0x49, 0x50, 0xc0, 0x9f, 0x72, 0xa9, 0x5b, 0xb9, 0x3e, 0x81, 0x7b, - 0x92, 0xab, 0x43, 0xc9, 0x35, 0x2d, 0x12, 0xdb, 0x4c, 0x61, 0x37, 0xf4, 0x3e, 0x81, 0xca, 0x50, - 0x84, 0xa5, 0xe4, 0x7b, 0x39, 0xa3, 0x76, 0x13, 0x0c, 0x7e, 0x08, 0x70, 0x41, 0x59, 0x72, 0xc7, - 0xc6, 0x84, 0xf1, 0x37, 0x60, 0xc4, 0x79, 0xc9, 0xfe, 0x79, 0x32, 0x70, 0x45, 0xbe, 0x00, 0xb1, - 0x09, 0xec, 0x64, 0x13, 0xd8, 0x8d, 0x60, 0x29, 0x87, 0x8e, 0xdb, 0x00, 0xbd, 0x2d, 0xd4, 0x2b, - 0x2e, 0xf5, 0x76, 0xae, 0x13, 0xd8, 0x6f, 0xd2, 0x29, 0x65, 0xb4, 0x58, 0xe9, 0x31, 0xec, 0xbd, - 0x26, 0x6c, 0x38, 0x2e, 0x46, 0xfc, 0xab, 0xc0, 0xbe, 0x84, 0xc8, 0x76, 0xbe, 0x83, 0xca, 0x30, - 0xf4, 0x7d, 0x12, 0x78, 0xd2, 0xff, 0x27, 0xd9, 0x61, 0xdd, 0xc0, 0xda, 0xe7, 0x02, 0xe8, 0x26, - 0x15, 0xc9, 0x05, 0x6a, 0x4e, 0x47, 0xda, 0xed, 0x1d, 0x3d, 0x86, 0x8a, 0x64, 0xe4, 0x9b, 0xe3, - 0x55, 0xf7, 0x59, 0xf7, 0xc5, 0xeb, 0xae, 0x59, 0x42, 0x15, 0xd0, 0x7a, 0x4e, 0xdf, 0x54, 0x10, - 0x40, 0xb9, 0xe9, 0x74, 0x9c, 0xbe, 0x63, 0xaa, 0xf5, 0xbf, 0xcb, 0x00, 0xcf, 0x53, 0x61, 0xe8, - 0x77, 0xf1, 0x94, 0x33, 0xbb, 0x09, 0xe1, 0xdb, 0x77, 0xa9, 0xf5, 0xf1, 0xff, 0x58, 0x6e, 0xb8, - 0x84, 0x2e, 0xa0, 0x9a, 0xb8, 0x00, 0x3d, 0x58, 0x2f, 0xc9, 0xd8, 0xc7, 0xfa, 0x30, 0x3f, 0x99, - 0x21, 0x32, 0x32, 0xa6, 0x40, 0x79, 0x9e, 0xcf, 0xb8, 0xc5, 0x3a, 0xd8, 0x18, 0x97, 0xc3, 0xff, - 0xad, 0xe0, 0x12, 0x6a, 0xc1, 0x5e, 0xd6, 0x11, 0xe8, 0xa3, 0x1c, 0xa6, 0xac, 0x57, 0xb6, 0x50, - 0x5d, 0xa6, 0x9a, 0xe2, 0xfe, 0x0a, 0x80, 0x56, 0xde, 0x0d, 0x6b, 0xdd, 0x3d, 0x4b, 0x45, 0xc5, - 0xef, 0xe4, 0x3d, 0xa8, 0x9e, 0x2a, 0xe8, 0x0c, 0xb4, 0x0b, 0xca, 0xd0, 0x41, 0x16, 0xbb, 0xf2, - 0xa5, 0x75, 0x7f, 0x23, 0x9e, 0xca, 0xf8, 0x1a, 0xb4, 0xde, 0x7a, 0xe5, 0xca, 0x76, 0x5b, 0x26, - 0xf1, 0x03, 0x94, 0x85, 0xa5, 0xd0, 0x51, 0xb6, 0xf6, 0x86, 0xcd, 0xb6, 0x94, 0xff, 0x0c, 0xba, - 0xe8, 0xfb, 0x30, 0xc7, 0x32, 0xa2, 0xf8, 0xa8, 0xd0, 0x4c, 0x71, 0xcf, 0xdf, 0x43, 0xb5, 0x17, - 0x90, 0x59, 0x34, 0x0e, 0x59, 0xe1, 0xf0, 0x0a, 0xef, 0xff, 0xe5, 0xc9, 0x6f, 0x8f, 0x47, 0x13, - 0x36, 0x5e, 0x0c, 0xec, 0x61, 0xe8, 0xd7, 0xfc, 0x30, 0x5a, 0x5c, 0x91, 0xda, 0x60, 0x4a, 0x22, - 0x56, 0xcb, 0xf9, 0xa9, 0x33, 0x28, 0xc7, 0xc1, 0x2f, 0xfe, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x09, - 0x67, 0x31, 0xd3, 0x08, 0x09, 0x00, 0x00, + // 841 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x7f, 0x8f, 0xda, 0x46, + 0x10, 0xc5, 0x36, 0x06, 0x6e, 0xcc, 0x25, 0xd6, 0x26, 0xba, 0xdc, 0xd1, 0x2a, 0xbd, 0xdb, 0xa6, + 0x15, 0x6d, 0x14, 0x13, 0xd1, 0x56, 0xbd, 0xfe, 0x2e, 0x39, 0xac, 0x03, 0x42, 0x48, 0x64, 0x48, + 0xa3, 0x54, 0x95, 0xa2, 0x05, 0x6f, 0x00, 0x1d, 0xb6, 0x09, 0x5e, 0x22, 0xf1, 0x19, 0xf2, 0x47, + 0xbf, 0x49, 0xff, 0xed, 0xd7, 0xab, 0xd6, 0xbb, 0x36, 0x3e, 0x30, 0x5c, 0xa5, 0xfc, 0xc7, 0xce, + 0xbc, 0x79, 0xf3, 0x66, 0xbc, 0x6f, 0x05, 0x3c, 0x98, 0x2f, 0x02, 0x16, 0x0c, 0x97, 0x6f, 0x6b, + 0x1e, 0xf1, 0xc9, 0x98, 0x7a, 0xd4, 0x67, 0xa9, 0x9f, 0x56, 0x94, 0x46, 0xb0, 0x8e, 0x54, 0x4e, + 0xc6, 0x41, 0x30, 0x9e, 0xd1, 0x5a, 0x52, 0x48, 0xfc, 0x95, 0x80, 0x55, 0x3e, 0xd9, 0x4c, 0x51, + 0x6f, 0xce, 0x64, 0x12, 0xff, 0xad, 0xc0, 0x51, 0x2f, 0x70, 0x69, 0x8b, 0x92, 0x19, 0x9b, 0x5c, + 0x4c, 0xe8, 0xe8, 0xca, 0xa1, 0xef, 0x96, 0x34, 0x64, 0xe8, 0x57, 0xd0, 0xe7, 0x8b, 0x60, 0x48, + 0x8f, 0x95, 0x53, 0xa5, 0x7a, 0xab, 0x5e, 0xb5, 0x52, 0x02, 0xb2, 0x4b, 0xac, 0x17, 0x1c, 0xef, + 0x88, 0x32, 0xfc, 0x1d, 0xe8, 0xd1, 0x19, 0xdd, 0x06, 0xa3, 0x65, 0x37, 0xba, 0x83, 0x56, 0xbb, + 0x67, 0xf7, 0xfb, 0x66, 0x0e, 0x95, 0xa1, 0xd4, 0x6d, 0xff, 0x61, 0x47, 0x27, 0x05, 0x1d, 0xc2, + 0x81, 0x63, 0x37, 0x9a, 0x22, 0xa9, 0xe2, 0x7f, 0x14, 0xb8, 0xb7, 0x45, 0x1f, 0xce, 0x03, 0x3f, + 0xa4, 0xe8, 0x37, 0xd0, 0x43, 0x46, 0x58, 0x2c, 0xe9, 0xab, 0xbd, 0x92, 0x44, 0x8d, 0xd5, 0xe7, + 0x05, 0x8e, 0xa8, 0xc3, 0x0e, 0xe8, 0xd1, 0x19, 0x19, 0x50, 0x14, 0x9a, 0x5e, 0x9b, 0x39, 0xae, + 0xe0, 0x65, 0x2f, 0x3e, 0x2a, 0xe8, 0x00, 0xf4, 0x06, 0xd7, 0x67, 0xaa, 0xa8, 0x04, 0xf9, 0xa6, + 0xdd, 0x68, 0x9a, 0x1a, 0x0f, 0x72, 0x95, 0xaf, 0xcd, 0x3c, 0x87, 0xf7, 0x9e, 0x0f, 0xde, 0x88, + 0xa3, 0x8e, 0x5f, 0x40, 0xe9, 0x19, 0x65, 0xc4, 0x25, 0x8c, 0xa0, 0x33, 0x28, 0x8f, 0x17, 0xf3, + 0xd1, 0x1b, 0xe2, 0xba, 0x0b, 0x1a, 0x86, 0x91, 0xce, 0x03, 0xc7, 0xe0, 0xb1, 0x86, 0x08, 0x71, + 0xc8, 0x84, 0xb1, 0x79, 0x02, 0x51, 0x05, 0x84, 0xc7, 0x24, 0x04, 0xbf, 0x83, 0x3c, 0x9f, 0x86, + 0x43, 0x87, 0x53, 0xdf, 0xdd, 0x64, 0xe3, 0xb1, 0x98, 0xed, 0x6e, 0xbc, 0x11, 0x41, 0x23, 0x0e, + 0xe8, 0x31, 0x94, 0x3c, 0x29, 0xe9, 0x58, 0x3b, 0x55, 0xaa, 0x46, 0xfd, 0x6e, 0x7a, 0x55, 0xb1, + 0x5c, 0x27, 0x41, 0xe1, 0x0f, 0x0a, 0x14, 0x2f, 0x66, 0xcb, 0x90, 0xd1, 0x05, 0xfa, 0x16, 0x74, + 0x3f, 0x70, 0x29, 0xef, 0xa7, 0x55, 0x8d, 0xfa, 0xfd, 0x74, 0xa9, 0xc4, 0x44, 0xdb, 0x0e, 0x6d, + 0x9f, 0x2d, 0x56, 0x8e, 0x00, 0x57, 0x3a, 0x00, 0xeb, 0x20, 0x32, 0x41, 0xbb, 0xa2, 0x2b, 0xa9, + 0x98, 0xff, 0x44, 0x5f, 0x82, 0xfe, 0x9e, 0xcc, 0x96, 0x42, 0xa9, 0x51, 0x37, 0x37, 0xbf, 0x9d, + 0x23, 0xd2, 0x3f, 0xaa, 0xe7, 0x0a, 0x3e, 0x07, 0x93, 0x87, 0xda, 0xfe, 0xdb, 0x20, 0xf9, 0xf6, + 0x0f, 0x20, 0xcf, 0x1b, 0x45, 0x94, 0x59, 0xe5, 0x51, 0x16, 0x77, 0x00, 0x49, 0x89, 0x9d, 0x60, + 0xea, 0xc7, 0x57, 0xf9, 0x16, 0xa8, 0x53, 0x57, 0x8a, 0x51, 0xa7, 0x6e, 0xc2, 0xa5, 0xee, 0xe5, + 0xfa, 0x02, 0xee, 0x48, 0xae, 0x2e, 0x25, 0xef, 0xe9, 0x0e, 0x32, 0xdc, 0x4c, 0x60, 0xd7, 0xf4, + 0x3e, 0x82, 0xe2, 0x48, 0x84, 0xa5, 0xe4, 0x3b, 0x19, 0x7b, 0x74, 0x62, 0x0c, 0xbe, 0x0f, 0x70, + 0x49, 0x59, 0xdc, 0x63, 0x6b, 0x7d, 0xf8, 0x07, 0x30, 0xa2, 0xbc, 0x64, 0xff, 0x3a, 0xde, 0xa6, + 0x22, 0x3f, 0xaf, 0x30, 0xb9, 0x15, 0x9b, 0xdc, 0x6a, 0xf8, 0x2b, 0xb9, 0x51, 0xdc, 0x01, 0xe8, + 0xef, 0xa1, 0x5e, 0x73, 0xa9, 0x37, 0x73, 0x9d, 0xc1, 0x61, 0x93, 0xce, 0x28, 0xa3, 0xbb, 0x95, + 0x9e, 0x42, 0xf9, 0x15, 0x61, 0xa3, 0xc9, 0x6e, 0xc4, 0xbf, 0x0a, 0x1c, 0x4a, 0x88, 0x1c, 0xe7, + 0x27, 0x28, 0x8e, 0x02, 0xcf, 0x23, 0xbe, 0x2b, 0xad, 0x7d, 0x96, 0x5e, 0xd6, 0x35, 0xac, 0x75, + 0x21, 0x80, 0x4e, 0x5c, 0x11, 0x37, 0x50, 0x33, 0x26, 0xd2, 0x6e, 0x9e, 0xe8, 0x21, 0x14, 0x25, + 0x23, 0x7f, 0x14, 0x5e, 0xf6, 0x9e, 0xf6, 0x9e, 0xbf, 0xea, 0x99, 0x39, 0x54, 0x04, 0xad, 0x6f, + 0x0f, 0x4c, 0x05, 0x01, 0x14, 0x9a, 0x76, 0xd7, 0x1e, 0xd8, 0xa6, 0x5a, 0xff, 0x50, 0x00, 0x78, + 0x96, 0x08, 0x43, 0x7f, 0xc1, 0xed, 0x8d, 0x67, 0x07, 0xe1, 0x9b, 0x9f, 0xc9, 0xca, 0xe7, 0xff, + 0xe3, 0xdd, 0xc2, 0x39, 0xf4, 0x04, 0x4a, 0xb1, 0x0b, 0xd0, 0xd1, 0xd6, 0x08, 0x36, 0x7f, 0xc5, + 0x2b, 0x9f, 0x6e, 0x52, 0xa5, 0xef, 0x20, 0xce, 0xa1, 0x4b, 0x30, 0x52, 0x7e, 0x40, 0x59, 0x5e, + 0x4e, 0x19, 0xa5, 0xb2, 0xa3, 0x0d, 0xce, 0xa1, 0x36, 0x94, 0xd3, 0x66, 0x40, 0x9f, 0x65, 0x30, + 0xa5, 0x6d, 0xb2, 0x87, 0xaa, 0x95, 0x68, 0xda, 0x3b, 0x5a, 0x56, 0x87, 0x8d, 0xe9, 0x9e, 0x26, + 0xa2, 0xa2, 0x2b, 0xf2, 0x11, 0x54, 0x8f, 0x15, 0x74, 0x0e, 0xda, 0x25, 0x65, 0xe8, 0x28, 0x8d, + 0x5d, 0x5b, 0xb2, 0x72, 0x6f, 0x2b, 0x9e, 0xc8, 0xf8, 0x1e, 0xb4, 0xfe, 0x66, 0xe5, 0xda, 0x71, + 0x7b, 0x36, 0xf1, 0x0b, 0x14, 0x84, 0x9b, 0xd0, 0x49, 0xba, 0xf6, 0x9a, 0xc3, 0xf6, 0x94, 0xff, + 0x0e, 0xba, 0x98, 0xfb, 0x38, 0xc3, 0x2d, 0xa2, 0xf8, 0x64, 0xa7, 0x8f, 0xa2, 0x99, 0x7f, 0x86, + 0x52, 0xdf, 0x27, 0xf3, 0x70, 0x12, 0xb0, 0x9d, 0xcb, 0xdb, 0xd9, 0xff, 0xc9, 0xa3, 0x3f, 0x1f, + 0x8e, 0xa7, 0x6c, 0xb2, 0x1c, 0x5a, 0xa3, 0xc0, 0xab, 0x79, 0x41, 0xb8, 0xbc, 0x22, 0xb5, 0xe1, + 0x8c, 0x84, 0xac, 0x96, 0xf1, 0x07, 0x66, 0x58, 0x88, 0x82, 0xdf, 0xfc, 0x17, 0x00, 0x00, 0xff, + 0xff, 0xf6, 0x49, 0xd2, 0xd3, 0xde, 0x08, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -896,7 +855,7 @@ const _ = grpc.SupportPackageIsVersion4 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ManagementClient interface { NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) - NodeInfo(ctx context.Context, in *NodeInfoRequest, opts ...grpc.CallOption) (*NodeInfoResponse, error) + NodeInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeInfoResponse, error) ClusterJoin(ctx context.Context, in *ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) ClusterLeave(ctx context.Context, in *ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) ClusterInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterInfoResponse, error) @@ -925,7 +884,7 @@ func (c *managementClient) NodeHealthCheck(ctx context.Context, in *NodeHealthCh return out, nil } -func (c *managementClient) NodeInfo(ctx context.Context, in *NodeInfoRequest, opts ...grpc.CallOption) (*NodeInfoResponse, error) { +func (c *managementClient) NodeInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeInfoResponse, error) { out := new(NodeInfoResponse) err := c.cc.Invoke(ctx, "/management.Management/NodeInfo", in, out, opts...) if err != nil { @@ -1064,7 +1023,7 @@ func (c *managementClient) Snapshot(ctx context.Context, in *empty.Empty, opts . // ManagementServer is the server API for Management service. type ManagementServer interface { NodeHealthCheck(context.Context, *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) - NodeInfo(context.Context, *NodeInfoRequest) (*NodeInfoResponse, error) + NodeInfo(context.Context, *empty.Empty) (*NodeInfoResponse, error) ClusterJoin(context.Context, *ClusterJoinRequest) (*empty.Empty, error) ClusterLeave(context.Context, *ClusterLeaveRequest) (*empty.Empty, error) ClusterInfo(context.Context, *empty.Empty) (*ClusterInfoResponse, error) @@ -1099,7 +1058,7 @@ func _Management_NodeHealthCheck_Handler(srv interface{}, ctx context.Context, d } func _Management_NodeInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeInfoRequest) + in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } @@ -1111,7 +1070,7 @@ func _Management_NodeInfo_Handler(srv interface{}, ctx context.Context, dec func FullMethod: "/management.Management/NodeInfo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).NodeInfo(ctx, req.(*NodeInfoRequest)) + return srv.(ManagementServer).NodeInfo(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } diff --git a/protobuf/management/management.proto b/protobuf/management/management.proto index 0e33f27..9f87fa7 100644 --- a/protobuf/management/management.proto +++ b/protobuf/management/management.proto @@ -23,7 +23,7 @@ option go_package = "github.com/mosuka/blast/protobuf/management"; service Management { rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) {} - rpc NodeInfo (NodeInfoRequest) returns (NodeInfoResponse) {} + rpc NodeInfo (google.protobuf.Empty) returns (NodeInfoResponse) {} rpc ClusterJoin (ClusterJoinRequest) returns (google.protobuf.Empty) {} rpc ClusterLeave (ClusterLeaveRequest) returns (google.protobuf.Empty) {} @@ -66,7 +66,7 @@ message Metadata { message Node { string bind_address = 1; - string status = 2; + string state = 2; Metadata metadata = 3; } @@ -74,10 +74,6 @@ message Cluster { map nodes = 1; } -message NodeInfoRequest { - string id = 1; -} - message NodeInfoResponse { Node node = 1; } From 071d61e6f4a1f881a8db7b7c4ab174c2207400b3 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Wed, 31 Jul 2019 16:59:31 +0900 Subject: [PATCH 11/76] Change the cluster watching method (#90) --- cmd/blast/manager_cluster_watch.go | 2 +- manager/grpc_service.go | 170 +++++++++++++------ protobuf/management/management.pb.go | 235 +++++++++++++++++++-------- protobuf/management/management.proto | 16 +- 4 files changed, 303 insertions(+), 120 deletions(-) diff --git a/cmd/blast/manager_cluster_watch.go b/cmd/blast/manager_cluster_watch.go index 0e74722..c44bb24 100644 --- a/cmd/blast/manager_cluster_watch.go +++ b/cmd/blast/manager_cluster_watch.go @@ -59,7 +59,7 @@ func managerClusterWatch(c *cli.Context) error { break } - clusterBytes, err := json.MarshalIndent(resp.Cluster, "", " ") + clusterBytes, err := json.MarshalIndent(resp, "", " ") if err != nil { return err } diff --git a/manager/grpc_service.go b/manager/grpc_service.go index e02c2c4..300a006 100644 --- a/manager/grpc_service.go +++ b/manager/grpc_service.go @@ -16,6 +16,7 @@ package manager import ( "context" + "encoding/json" "errors" "strings" "sync" @@ -43,7 +44,7 @@ type GRPCService struct { peers *management.Cluster peerClients map[string]*GRPCClient cluster *management.Cluster - clusterChans map[chan management.ClusterInfoResponse]struct{} + clusterChans map[chan management.ClusterWatchResponse]struct{} clusterMutex sync.RWMutex stateChans map[chan management.WatchResponse]struct{} @@ -58,7 +59,7 @@ func NewGRPCService(raftServer *RaftServer, logger *zap.Logger) (*GRPCService, e peers: &management.Cluster{Nodes: make(map[string]*management.Node, 0)}, peerClients: make(map[string]*GRPCClient, 0), cluster: &management.Cluster{Nodes: make(map[string]*management.Node, 0)}, - clusterChans: make(map[chan management.ClusterInfoResponse]struct{}), + clusterChans: make(map[chan management.ClusterWatchResponse]struct{}), stateChans: make(map[chan management.WatchResponse]struct{}), }, nil @@ -110,6 +111,21 @@ func (s *GRPCService) getLeaderClient() (*GRPCClient, error) { return client, nil } +func (s *GRPCService) cloneCluster(cluster *management.Cluster) (*management.Cluster, error) { + b, err := json.Marshal(cluster) + if err != nil { + return nil, err + } + + var clone *management.Cluster + err = json.Unmarshal(b, &clone) + if err != nil { + return nil, err + } + + return clone, nil +} + func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { s.updateClusterStopCh = make(chan struct{}) s.updateClusterDoneCh = make(chan struct{}) @@ -128,6 +144,12 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { return } + savedCluster, err := s.cloneCluster(s.cluster) + if err != nil { + s.logger.Error(err.Error()) + return + } + peersHash, err := hashutils.Hash(s.peers) if err != nil { s.logger.Error(err.Error()) @@ -153,8 +175,14 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { return } + snapshotCluster, err := s.cloneCluster(s.cluster) + if err != nil { + s.logger.Error(err.Error()) + return + } + // create peer node list with out self node - for id, node := range s.cluster.Nodes { + for id, node := range snapshotCluster.Nodes { if id != s.NodeID() { s.peers.Nodes[id] = node } @@ -221,17 +249,69 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { // compare cluster hash if !cmp.Equal(clusterHash, newClusterHash) { - clusterResp := &management.ClusterInfoResponse{ - Cluster: s.cluster, + // check joined and updated nodes + for id, node := range snapshotCluster.Nodes { + nodeSnapshot, exist := savedCluster.Nodes[id] + if exist { + // node updated + n1, err := json.Marshal(node) + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", node)) + } + n2, err := json.Marshal(nodeSnapshot) + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", nodeSnapshot)) + } + if !cmp.Equal(n1, n2) { + // notify the cluster changes + clusterResp := &management.ClusterWatchResponse{ + Event: management.ClusterWatchResponse_UPDATE, + Id: id, + Node: node, + Cluster: snapshotCluster, + } + for c := range s.clusterChans { + c <- *clusterResp + } + } else { + // no change + } + } else { + // node joined + // notify the cluster changes + clusterResp := &management.ClusterWatchResponse{ + Event: management.ClusterWatchResponse_JOIN, + Id: id, + Node: node, + Cluster: snapshotCluster, + } + for c := range s.clusterChans { + c <- *clusterResp + } + } } - // output to channel - for c := range s.clusterChans { - c <- *clusterResp + // check left nodes + for id, node := range savedCluster.Nodes { + if _, exist := snapshotCluster.Nodes[id]; !exist { + // node left + // notify the cluster changes + clusterResp := &management.ClusterWatchResponse{ + Event: management.ClusterWatchResponse_LEAVE, + Id: id, + Node: node, + Cluster: snapshotCluster, + } + for c := range s.clusterChans { + c <- *clusterResp + } + } } // update cluster hash clusterHash = newClusterHash + + savedCluster = snapshotCluster } default: time.Sleep(100 * time.Millisecond) @@ -278,73 +358,61 @@ func (s *GRPCService) NodeID() string { return s.raftServer.NodeID() } -func (s *GRPCService) getSelfNode() (*management.Node, error) { +func (s *GRPCService) getSelfNode() *management.Node { node := s.raftServer.node node.State = s.raftServer.State().String() - return node, nil + return node } func (s *GRPCService) getPeerNode(id string) (*management.Node, error) { - var nodeInfo *management.Node - var err error + if _, exist := s.peerClients[id]; !exist { + err := errors.New("node does not exist in peers") + s.logger.Debug(err.Error(), zap.String("id", id)) + return nil, err + } - if peerClient, exist := s.peerClients[id]; exist { - nodeInfo, err = peerClient.NodeInfo() - if err != nil { - s.logger.Debug(err.Error()) - nodeInfo = &management.Node{ - BindAddress: "", - State: raft.Shutdown.String(), - Metadata: &management.Metadata{}, - } - } - } else { - nodeInfo = &management.Node{ + node, err := s.peerClients[id].NodeInfo() + if err != nil { + s.logger.Debug(err.Error(), zap.String("id", id)) + return &management.Node{ BindAddress: "", State: raft.Shutdown.String(), - Metadata: &management.Metadata{}, - } + Metadata: &management.Metadata{ + GrpcAddress: "", + HttpAddress: "", + }, + }, nil } - return nodeInfo, nil + return node, nil } func (s *GRPCService) getNode(id string) (*management.Node, error) { - var nodeInfo *management.Node - var err error - if id == "" || id == s.NodeID() { - nodeInfo, err = s.getSelfNode() + return s.getSelfNode(), nil } else { - nodeInfo, err = s.getPeerNode(id) - } - - if err != nil { - s.logger.Error(err.Error()) - return nil, err + return s.getPeerNode(id) } - - return nodeInfo, nil } func (s *GRPCService) NodeInfo(ctx context.Context, req *empty.Empty) (*management.NodeInfoResponse, error) { resp := &management.NodeInfoResponse{} - nodeInfo, err := s.getNode(s.NodeID()) + node, err := s.getNode(s.NodeID()) if err != nil { s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } - resp.Node = nodeInfo - - return resp, nil + return &management.NodeInfoResponse{ + Node: node, + }, nil } -func (s *GRPCService) setNode(id string, nodeConfig *management.Node) error { +func (s *GRPCService) setNode(id string, node *management.Node) error { if s.raftServer.IsLeader() { - err := s.raftServer.SetNode(id, nodeConfig) + err := s.raftServer.SetNode(id, node) if err != nil { s.logger.Error(err.Error()) return err @@ -356,7 +424,7 @@ func (s *GRPCService) setNode(id string, nodeConfig *management.Node) error { s.logger.Error(err.Error()) return err } - err = client.ClusterJoin(id, nodeConfig) + err = client.ClusterJoin(id, node) if err != nil { s.logger.Error(err.Error()) return err @@ -422,13 +490,13 @@ func (s *GRPCService) getCluster() (*management.Cluster, error) { } // update latest node state - for nodeId := range cluster.Nodes { - node, err := s.getNode(nodeId) + for id := range cluster.Nodes { + node, err := s.getNode(id) if err != nil { - s.logger.Warn(err.Error()) + s.logger.Debug(err.Error()) continue } - cluster.Nodes[nodeId].State = node.State + cluster.Nodes[id] = node } return cluster, nil @@ -449,7 +517,7 @@ func (s *GRPCService) ClusterInfo(ctx context.Context, req *empty.Empty) (*manag } func (s *GRPCService) ClusterWatch(req *empty.Empty, server management.Management_ClusterWatchServer) error { - chans := make(chan management.ClusterInfoResponse) + chans := make(chan management.ClusterWatchResponse) s.clusterMutex.Lock() s.clusterChans[chans] = struct{}{} diff --git a/protobuf/management/management.pb.go b/protobuf/management/management.pb.go index 8b8235c..f5cf092 100644 --- a/protobuf/management/management.pb.go +++ b/protobuf/management/management.pb.go @@ -89,6 +89,37 @@ func (NodeHealthCheckResponse_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor_5e030ad796566078, []int{1, 0} } +type ClusterWatchResponse_Event int32 + +const ( + ClusterWatchResponse_UNKNOWN ClusterWatchResponse_Event = 0 + ClusterWatchResponse_JOIN ClusterWatchResponse_Event = 1 + ClusterWatchResponse_LEAVE ClusterWatchResponse_Event = 2 + ClusterWatchResponse_UPDATE ClusterWatchResponse_Event = 3 +) + +var ClusterWatchResponse_Event_name = map[int32]string{ + 0: "UNKNOWN", + 1: "JOIN", + 2: "LEAVE", + 3: "UPDATE", +} + +var ClusterWatchResponse_Event_value = map[string]int32{ + "UNKNOWN": 0, + "JOIN": 1, + "LEAVE": 2, + "UPDATE": 3, +} + +func (x ClusterWatchResponse_Event) String() string { + return proto.EnumName(ClusterWatchResponse_Event_name, int32(x)) +} + +func (ClusterWatchResponse_Event) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{9, 0} +} + type WatchResponse_Command int32 const ( @@ -114,7 +145,7 @@ func (x WatchResponse_Command) String() string { } func (WatchResponse_Command) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{14, 0} + return fileDescriptor_5e030ad796566078, []int{15, 0} } type NodeHealthCheckRequest struct { @@ -501,6 +532,69 @@ func (m *ClusterInfoResponse) GetCluster() *Cluster { return nil } +type ClusterWatchResponse struct { + Event ClusterWatchResponse_Event `protobuf:"varint,1,opt,name=event,proto3,enum=management.ClusterWatchResponse_Event" json:"event,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Node *Node `protobuf:"bytes,3,opt,name=node,proto3" json:"node,omitempty"` + Cluster *Cluster `protobuf:"bytes,4,opt,name=cluster,proto3" json:"cluster,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterWatchResponse) Reset() { *m = ClusterWatchResponse{} } +func (m *ClusterWatchResponse) String() string { return proto.CompactTextString(m) } +func (*ClusterWatchResponse) ProtoMessage() {} +func (*ClusterWatchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{9} +} + +func (m *ClusterWatchResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterWatchResponse.Unmarshal(m, b) +} +func (m *ClusterWatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterWatchResponse.Marshal(b, m, deterministic) +} +func (m *ClusterWatchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterWatchResponse.Merge(m, src) +} +func (m *ClusterWatchResponse) XXX_Size() int { + return xxx_messageInfo_ClusterWatchResponse.Size(m) +} +func (m *ClusterWatchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterWatchResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterWatchResponse proto.InternalMessageInfo + +func (m *ClusterWatchResponse) GetEvent() ClusterWatchResponse_Event { + if m != nil { + return m.Event + } + return ClusterWatchResponse_UNKNOWN +} + +func (m *ClusterWatchResponse) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ClusterWatchResponse) GetNode() *Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *ClusterWatchResponse) GetCluster() *Cluster { + if m != nil { + return m.Cluster + } + return nil +} + type GetRequest struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -512,7 +606,7 @@ func (m *GetRequest) Reset() { *m = GetRequest{} } func (m *GetRequest) String() string { return proto.CompactTextString(m) } func (*GetRequest) ProtoMessage() {} func (*GetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{9} + return fileDescriptor_5e030ad796566078, []int{10} } func (m *GetRequest) XXX_Unmarshal(b []byte) error { @@ -551,7 +645,7 @@ func (m *GetResponse) Reset() { *m = GetResponse{} } func (m *GetResponse) String() string { return proto.CompactTextString(m) } func (*GetResponse) ProtoMessage() {} func (*GetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{10} + return fileDescriptor_5e030ad796566078, []int{11} } func (m *GetResponse) XXX_Unmarshal(b []byte) error { @@ -591,7 +685,7 @@ func (m *SetRequest) Reset() { *m = SetRequest{} } func (m *SetRequest) String() string { return proto.CompactTextString(m) } func (*SetRequest) ProtoMessage() {} func (*SetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{11} + return fileDescriptor_5e030ad796566078, []int{12} } func (m *SetRequest) XXX_Unmarshal(b []byte) error { @@ -637,7 +731,7 @@ func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } func (*DeleteRequest) ProtoMessage() {} func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{12} + return fileDescriptor_5e030ad796566078, []int{13} } func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { @@ -676,7 +770,7 @@ func (m *WatchRequest) Reset() { *m = WatchRequest{} } func (m *WatchRequest) String() string { return proto.CompactTextString(m) } func (*WatchRequest) ProtoMessage() {} func (*WatchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{13} + return fileDescriptor_5e030ad796566078, []int{14} } func (m *WatchRequest) XXX_Unmarshal(b []byte) error { @@ -717,7 +811,7 @@ func (m *WatchResponse) Reset() { *m = WatchResponse{} } func (m *WatchResponse) String() string { return proto.CompactTextString(m) } func (*WatchResponse) ProtoMessage() {} func (*WatchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{14} + return fileDescriptor_5e030ad796566078, []int{15} } func (m *WatchResponse) XXX_Unmarshal(b []byte) error { @@ -762,6 +856,7 @@ func (m *WatchResponse) GetValue() *any.Any { func init() { proto.RegisterEnum("management.NodeHealthCheckRequest_Probe", NodeHealthCheckRequest_Probe_name, NodeHealthCheckRequest_Probe_value) proto.RegisterEnum("management.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) + proto.RegisterEnum("management.ClusterWatchResponse_Event", ClusterWatchResponse_Event_name, ClusterWatchResponse_Event_value) proto.RegisterEnum("management.WatchResponse_Command", WatchResponse_Command_name, WatchResponse_Command_value) proto.RegisterType((*NodeHealthCheckRequest)(nil), "management.NodeHealthCheckRequest") proto.RegisterType((*NodeHealthCheckResponse)(nil), "management.NodeHealthCheckResponse") @@ -773,6 +868,7 @@ func init() { proto.RegisterType((*ClusterJoinRequest)(nil), "management.ClusterJoinRequest") proto.RegisterType((*ClusterLeaveRequest)(nil), "management.ClusterLeaveRequest") proto.RegisterType((*ClusterInfoResponse)(nil), "management.ClusterInfoResponse") + proto.RegisterType((*ClusterWatchResponse)(nil), "management.ClusterWatchResponse") proto.RegisterType((*GetRequest)(nil), "management.GetRequest") proto.RegisterType((*GetResponse)(nil), "management.GetResponse") proto.RegisterType((*SetRequest)(nil), "management.SetRequest") @@ -786,60 +882,65 @@ func init() { } var fileDescriptor_5e030ad796566078 = []byte{ - // 841 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x7f, 0x8f, 0xda, 0x46, - 0x10, 0xc5, 0x36, 0x06, 0x6e, 0xcc, 0x25, 0xd6, 0x26, 0xba, 0xdc, 0xd1, 0x2a, 0xbd, 0xdb, 0xa6, - 0x15, 0x6d, 0x14, 0x13, 0xd1, 0x56, 0xbd, 0xfe, 0x2e, 0x39, 0xac, 0x03, 0x42, 0x48, 0x64, 0x48, - 0xa3, 0x54, 0x95, 0xa2, 0x05, 0x6f, 0x00, 0x1d, 0xb6, 0x09, 0x5e, 0x22, 0xf1, 0x19, 0xf2, 0x47, - 0xbf, 0x49, 0xff, 0xed, 0xd7, 0xab, 0xd6, 0xbb, 0x36, 0x3e, 0x30, 0x5c, 0xa5, 0xfc, 0xc7, 0xce, - 0xbc, 0x79, 0xf3, 0x66, 0xbc, 0x6f, 0x05, 0x3c, 0x98, 0x2f, 0x02, 0x16, 0x0c, 0x97, 0x6f, 0x6b, - 0x1e, 0xf1, 0xc9, 0x98, 0x7a, 0xd4, 0x67, 0xa9, 0x9f, 0x56, 0x94, 0x46, 0xb0, 0x8e, 0x54, 0x4e, - 0xc6, 0x41, 0x30, 0x9e, 0xd1, 0x5a, 0x52, 0x48, 0xfc, 0x95, 0x80, 0x55, 0x3e, 0xd9, 0x4c, 0x51, - 0x6f, 0xce, 0x64, 0x12, 0xff, 0xad, 0xc0, 0x51, 0x2f, 0x70, 0x69, 0x8b, 0x92, 0x19, 0x9b, 0x5c, - 0x4c, 0xe8, 0xe8, 0xca, 0xa1, 0xef, 0x96, 0x34, 0x64, 0xe8, 0x57, 0xd0, 0xe7, 0x8b, 0x60, 0x48, - 0x8f, 0x95, 0x53, 0xa5, 0x7a, 0xab, 0x5e, 0xb5, 0x52, 0x02, 0xb2, 0x4b, 0xac, 0x17, 0x1c, 0xef, - 0x88, 0x32, 0xfc, 0x1d, 0xe8, 0xd1, 0x19, 0xdd, 0x06, 0xa3, 0x65, 0x37, 0xba, 0x83, 0x56, 0xbb, - 0x67, 0xf7, 0xfb, 0x66, 0x0e, 0x95, 0xa1, 0xd4, 0x6d, 0xff, 0x61, 0x47, 0x27, 0x05, 0x1d, 0xc2, - 0x81, 0x63, 0x37, 0x9a, 0x22, 0xa9, 0xe2, 0x7f, 0x14, 0xb8, 0xb7, 0x45, 0x1f, 0xce, 0x03, 0x3f, - 0xa4, 0xe8, 0x37, 0xd0, 0x43, 0x46, 0x58, 0x2c, 0xe9, 0xab, 0xbd, 0x92, 0x44, 0x8d, 0xd5, 0xe7, - 0x05, 0x8e, 0xa8, 0xc3, 0x0e, 0xe8, 0xd1, 0x19, 0x19, 0x50, 0x14, 0x9a, 0x5e, 0x9b, 0x39, 0xae, - 0xe0, 0x65, 0x2f, 0x3e, 0x2a, 0xe8, 0x00, 0xf4, 0x06, 0xd7, 0x67, 0xaa, 0xa8, 0x04, 0xf9, 0xa6, - 0xdd, 0x68, 0x9a, 0x1a, 0x0f, 0x72, 0x95, 0xaf, 0xcd, 0x3c, 0x87, 0xf7, 0x9e, 0x0f, 0xde, 0x88, - 0xa3, 0x8e, 0x5f, 0x40, 0xe9, 0x19, 0x65, 0xc4, 0x25, 0x8c, 0xa0, 0x33, 0x28, 0x8f, 0x17, 0xf3, - 0xd1, 0x1b, 0xe2, 0xba, 0x0b, 0x1a, 0x86, 0x91, 0xce, 0x03, 0xc7, 0xe0, 0xb1, 0x86, 0x08, 0x71, - 0xc8, 0x84, 0xb1, 0x79, 0x02, 0x51, 0x05, 0x84, 0xc7, 0x24, 0x04, 0xbf, 0x83, 0x3c, 0x9f, 0x86, - 0x43, 0x87, 0x53, 0xdf, 0xdd, 0x64, 0xe3, 0xb1, 0x98, 0xed, 0x6e, 0xbc, 0x11, 0x41, 0x23, 0x0e, - 0xe8, 0x31, 0x94, 0x3c, 0x29, 0xe9, 0x58, 0x3b, 0x55, 0xaa, 0x46, 0xfd, 0x6e, 0x7a, 0x55, 0xb1, - 0x5c, 0x27, 0x41, 0xe1, 0x0f, 0x0a, 0x14, 0x2f, 0x66, 0xcb, 0x90, 0xd1, 0x05, 0xfa, 0x16, 0x74, - 0x3f, 0x70, 0x29, 0xef, 0xa7, 0x55, 0x8d, 0xfa, 0xfd, 0x74, 0xa9, 0xc4, 0x44, 0xdb, 0x0e, 0x6d, - 0x9f, 0x2d, 0x56, 0x8e, 0x00, 0x57, 0x3a, 0x00, 0xeb, 0x20, 0x32, 0x41, 0xbb, 0xa2, 0x2b, 0xa9, - 0x98, 0xff, 0x44, 0x5f, 0x82, 0xfe, 0x9e, 0xcc, 0x96, 0x42, 0xa9, 0x51, 0x37, 0x37, 0xbf, 0x9d, - 0x23, 0xd2, 0x3f, 0xaa, 0xe7, 0x0a, 0x3e, 0x07, 0x93, 0x87, 0xda, 0xfe, 0xdb, 0x20, 0xf9, 0xf6, - 0x0f, 0x20, 0xcf, 0x1b, 0x45, 0x94, 0x59, 0xe5, 0x51, 0x16, 0x77, 0x00, 0x49, 0x89, 0x9d, 0x60, - 0xea, 0xc7, 0x57, 0xf9, 0x16, 0xa8, 0x53, 0x57, 0x8a, 0x51, 0xa7, 0x6e, 0xc2, 0xa5, 0xee, 0xe5, - 0xfa, 0x02, 0xee, 0x48, 0xae, 0x2e, 0x25, 0xef, 0xe9, 0x0e, 0x32, 0xdc, 0x4c, 0x60, 0xd7, 0xf4, - 0x3e, 0x82, 0xe2, 0x48, 0x84, 0xa5, 0xe4, 0x3b, 0x19, 0x7b, 0x74, 0x62, 0x0c, 0xbe, 0x0f, 0x70, - 0x49, 0x59, 0xdc, 0x63, 0x6b, 0x7d, 0xf8, 0x07, 0x30, 0xa2, 0xbc, 0x64, 0xff, 0x3a, 0xde, 0xa6, - 0x22, 0x3f, 0xaf, 0x30, 0xb9, 0x15, 0x9b, 0xdc, 0x6a, 0xf8, 0x2b, 0xb9, 0x51, 0xdc, 0x01, 0xe8, - 0xef, 0xa1, 0x5e, 0x73, 0xa9, 0x37, 0x73, 0x9d, 0xc1, 0x61, 0x93, 0xce, 0x28, 0xa3, 0xbb, 0x95, - 0x9e, 0x42, 0xf9, 0x15, 0x61, 0xa3, 0xc9, 0x6e, 0xc4, 0xbf, 0x0a, 0x1c, 0x4a, 0x88, 0x1c, 0xe7, - 0x27, 0x28, 0x8e, 0x02, 0xcf, 0x23, 0xbe, 0x2b, 0xad, 0x7d, 0x96, 0x5e, 0xd6, 0x35, 0xac, 0x75, - 0x21, 0x80, 0x4e, 0x5c, 0x11, 0x37, 0x50, 0x33, 0x26, 0xd2, 0x6e, 0x9e, 0xe8, 0x21, 0x14, 0x25, - 0x23, 0x7f, 0x14, 0x5e, 0xf6, 0x9e, 0xf6, 0x9e, 0xbf, 0xea, 0x99, 0x39, 0x54, 0x04, 0xad, 0x6f, - 0x0f, 0x4c, 0x05, 0x01, 0x14, 0x9a, 0x76, 0xd7, 0x1e, 0xd8, 0xa6, 0x5a, 0xff, 0x50, 0x00, 0x78, - 0x96, 0x08, 0x43, 0x7f, 0xc1, 0xed, 0x8d, 0x67, 0x07, 0xe1, 0x9b, 0x9f, 0xc9, 0xca, 0xe7, 0xff, - 0xe3, 0xdd, 0xc2, 0x39, 0xf4, 0x04, 0x4a, 0xb1, 0x0b, 0xd0, 0xd1, 0xd6, 0x08, 0x36, 0x7f, 0xc5, - 0x2b, 0x9f, 0x6e, 0x52, 0xa5, 0xef, 0x20, 0xce, 0xa1, 0x4b, 0x30, 0x52, 0x7e, 0x40, 0x59, 0x5e, - 0x4e, 0x19, 0xa5, 0xb2, 0xa3, 0x0d, 0xce, 0xa1, 0x36, 0x94, 0xd3, 0x66, 0x40, 0x9f, 0x65, 0x30, - 0xa5, 0x6d, 0xb2, 0x87, 0xaa, 0x95, 0x68, 0xda, 0x3b, 0x5a, 0x56, 0x87, 0x8d, 0xe9, 0x9e, 0x26, - 0xa2, 0xa2, 0x2b, 0xf2, 0x11, 0x54, 0x8f, 0x15, 0x74, 0x0e, 0xda, 0x25, 0x65, 0xe8, 0x28, 0x8d, - 0x5d, 0x5b, 0xb2, 0x72, 0x6f, 0x2b, 0x9e, 0xc8, 0xf8, 0x1e, 0xb4, 0xfe, 0x66, 0xe5, 0xda, 0x71, - 0x7b, 0x36, 0xf1, 0x0b, 0x14, 0x84, 0x9b, 0xd0, 0x49, 0xba, 0xf6, 0x9a, 0xc3, 0xf6, 0x94, 0xff, - 0x0e, 0xba, 0x98, 0xfb, 0x38, 0xc3, 0x2d, 0xa2, 0xf8, 0x64, 0xa7, 0x8f, 0xa2, 0x99, 0x7f, 0x86, - 0x52, 0xdf, 0x27, 0xf3, 0x70, 0x12, 0xb0, 0x9d, 0xcb, 0xdb, 0xd9, 0xff, 0xc9, 0xa3, 0x3f, 0x1f, - 0x8e, 0xa7, 0x6c, 0xb2, 0x1c, 0x5a, 0xa3, 0xc0, 0xab, 0x79, 0x41, 0xb8, 0xbc, 0x22, 0xb5, 0xe1, - 0x8c, 0x84, 0xac, 0x96, 0xf1, 0x07, 0x66, 0x58, 0x88, 0x82, 0xdf, 0xfc, 0x17, 0x00, 0x00, 0xff, - 0xff, 0xf6, 0x49, 0xd2, 0xd3, 0xde, 0x08, 0x00, 0x00, + // 920 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0x7f, 0x6f, 0xda, 0x46, + 0x18, 0xc6, 0x36, 0x0e, 0xe4, 0x25, 0x69, 0xad, 0x6b, 0x94, 0x26, 0x6c, 0xea, 0x92, 0x5b, 0x57, + 0x65, 0xab, 0x0a, 0x15, 0x5b, 0xb5, 0x6c, 0xeb, 0x7e, 0xd0, 0x60, 0x25, 0x30, 0x4a, 0x22, 0x43, + 0x56, 0x75, 0x9a, 0x54, 0x1d, 0xf8, 0x0a, 0x28, 0xd8, 0xa6, 0xf8, 0x88, 0x94, 0xcf, 0xb0, 0x49, + 0xfb, 0x26, 0xfb, 0x77, 0x5f, 0x6b, 0x1f, 0x61, 0x3a, 0xdf, 0xd9, 0x18, 0xb0, 0x4d, 0xff, 0xe3, + 0xde, 0x7b, 0x9e, 0xe7, 0x9e, 0xf7, 0xbd, 0x7b, 0x5f, 0x03, 0x8f, 0xa7, 0x33, 0x8f, 0x79, 0xfd, + 0xf9, 0xfb, 0xaa, 0x43, 0x5c, 0x32, 0xa4, 0x0e, 0x75, 0x59, 0xec, 0x67, 0x25, 0xd8, 0x46, 0xb0, + 0x88, 0x94, 0x0f, 0x87, 0x9e, 0x37, 0x9c, 0xd0, 0x6a, 0x44, 0x24, 0xee, 0x9d, 0x80, 0x95, 0x3f, + 0x59, 0xdd, 0xa2, 0xce, 0x94, 0xc9, 0x4d, 0xfc, 0xb7, 0x02, 0xfb, 0x1d, 0xcf, 0xa6, 0x17, 0x94, + 0x4c, 0xd8, 0xe8, 0x6c, 0x44, 0x07, 0x37, 0x16, 0xfd, 0x30, 0xa7, 0x3e, 0x43, 0x3f, 0x81, 0x3e, + 0x9d, 0x79, 0x7d, 0x7a, 0xa0, 0x1c, 0x29, 0x27, 0xf7, 0x6a, 0x27, 0x95, 0x98, 0x81, 0x64, 0x4a, + 0xe5, 0x8a, 0xe3, 0x2d, 0x41, 0xc3, 0x2f, 0x40, 0x0f, 0xd6, 0xe8, 0x3e, 0x94, 0x2e, 0xcc, 0x7a, + 0xbb, 0x77, 0xd1, 0xec, 0x98, 0xdd, 0xae, 0x91, 0x43, 0x3b, 0x50, 0x6c, 0x37, 0x7f, 0x33, 0x83, + 0x95, 0x82, 0x76, 0x61, 0xdb, 0x32, 0xeb, 0x0d, 0xb1, 0xa9, 0xe2, 0x7f, 0x14, 0x78, 0xb8, 0x26, + 0xef, 0x4f, 0x3d, 0xd7, 0xa7, 0xe8, 0x67, 0xd0, 0x7d, 0x46, 0x58, 0x68, 0xe9, 0xcb, 0x4c, 0x4b, + 0x82, 0x53, 0xe9, 0x72, 0x82, 0x25, 0x78, 0xd8, 0x02, 0x3d, 0x58, 0xa3, 0x12, 0x14, 0x84, 0xa7, + 0xb7, 0x46, 0x8e, 0x3b, 0xb8, 0xee, 0x84, 0x4b, 0x05, 0x6d, 0x83, 0x5e, 0xe7, 0xfe, 0x0c, 0x15, + 0x15, 0x21, 0xdf, 0x30, 0xeb, 0x0d, 0x43, 0xe3, 0x41, 0xee, 0xf2, 0xad, 0x91, 0xe7, 0xf0, 0xce, + 0x65, 0xef, 0x9d, 0x58, 0xea, 0xf8, 0x0a, 0x8a, 0xaf, 0x29, 0x23, 0x36, 0x61, 0x04, 0x1d, 0xc3, + 0xce, 0x70, 0x36, 0x1d, 0xbc, 0x23, 0xb6, 0x3d, 0xa3, 0xbe, 0x1f, 0xf8, 0xdc, 0xb6, 0x4a, 0x3c, + 0x56, 0x17, 0x21, 0x0e, 0x19, 0x31, 0x36, 0x8d, 0x20, 0xaa, 0x80, 0xf0, 0x98, 0x84, 0xe0, 0x0f, + 0x90, 0xe7, 0xd9, 0x70, 0x68, 0x7f, 0xec, 0xda, 0xab, 0x6a, 0x3c, 0x16, 0xaa, 0xed, 0x85, 0x15, + 0x11, 0x32, 0x62, 0x81, 0x9e, 0x43, 0xd1, 0x91, 0x96, 0x0e, 0xb4, 0x23, 0xe5, 0xa4, 0x54, 0xdb, + 0x8b, 0x97, 0x2a, 0xb4, 0x6b, 0x45, 0x28, 0xfc, 0xa7, 0x02, 0x85, 0xb3, 0xc9, 0xdc, 0x67, 0x74, + 0x86, 0xbe, 0x01, 0xdd, 0xf5, 0x6c, 0xca, 0xcf, 0xd3, 0x4e, 0x4a, 0xb5, 0x47, 0x71, 0xaa, 0xc4, + 0x04, 0xd5, 0xf6, 0x4d, 0x97, 0xcd, 0xee, 0x2c, 0x01, 0x2e, 0xb7, 0x00, 0x16, 0x41, 0x64, 0x80, + 0x76, 0x43, 0xef, 0xa4, 0x63, 0xfe, 0x13, 0x3d, 0x01, 0xfd, 0x96, 0x4c, 0xe6, 0xc2, 0x69, 0xa9, + 0x66, 0xac, 0xde, 0x9d, 0x25, 0xb6, 0xbf, 0x57, 0x4f, 0x15, 0x7c, 0x0a, 0x06, 0x0f, 0x35, 0xdd, + 0xf7, 0x5e, 0x74, 0xf7, 0x8f, 0x21, 0xcf, 0x0f, 0x0a, 0x24, 0x93, 0xe8, 0xc1, 0x2e, 0x6e, 0x01, + 0x92, 0x16, 0x5b, 0xde, 0xd8, 0x0d, 0x9f, 0xf2, 0x3d, 0x50, 0xc7, 0xb6, 0x34, 0xa3, 0x8e, 0xed, + 0x48, 0x4b, 0xcd, 0xd4, 0xfa, 0x02, 0x1e, 0x48, 0xad, 0x36, 0x25, 0xb7, 0x34, 0x45, 0x0c, 0x37, + 0x22, 0xd8, 0x92, 0xdf, 0x67, 0x50, 0x18, 0x88, 0xb0, 0xb4, 0xfc, 0x20, 0xa1, 0x8e, 0x56, 0x88, + 0xc1, 0xff, 0x29, 0xb0, 0x27, 0x83, 0x6f, 0x08, 0x1b, 0x8c, 0x22, 0x9d, 0x97, 0xa0, 0xd3, 0x5b, + 0xea, 0x32, 0xf9, 0xe6, 0x9f, 0x24, 0xa8, 0x2c, 0x11, 0x2a, 0x26, 0x47, 0x5b, 0x82, 0x24, 0xcd, + 0xaa, 0x6b, 0x99, 0x6b, 0x59, 0x99, 0xc7, 0xbd, 0xe7, 0x3f, 0xc2, 0xfb, 0x0b, 0xd0, 0x83, 0x43, + 0x79, 0x57, 0x5d, 0x77, 0x7e, 0xed, 0x5c, 0xbe, 0xe9, 0x18, 0x39, 0xde, 0x3b, 0xad, 0xcb, 0x66, + 0x47, 0x34, 0x54, 0xdb, 0xac, 0x07, 0x0d, 0x05, 0xb0, 0x75, 0x7d, 0xd5, 0xa8, 0xf7, 0x4c, 0x43, + 0xc3, 0x8f, 0x00, 0xce, 0x29, 0x0b, 0xcb, 0xba, 0xf6, 0x62, 0xf0, 0x77, 0x50, 0x0a, 0xf6, 0x65, + 0x21, 0xbe, 0x0a, 0x1f, 0x90, 0x22, 0x5f, 0xb4, 0x98, 0x6b, 0x95, 0x70, 0xae, 0x55, 0xea, 0xee, + 0x9d, 0x7c, 0x44, 0xb8, 0x05, 0xd0, 0xcd, 0x90, 0x5e, 0x68, 0xa9, 0x9b, 0xb5, 0x8e, 0x61, 0xb7, + 0x41, 0x27, 0x94, 0xd1, 0x74, 0xa7, 0x47, 0xb0, 0x23, 0xef, 0x20, 0x0d, 0xf1, 0xaf, 0x02, 0xbb, + 0xcb, 0xf7, 0xfa, 0x03, 0x14, 0x06, 0x9e, 0xe3, 0x10, 0xd7, 0x96, 0x37, 0x7b, 0x1c, 0xaf, 0xf1, + 0xf2, 0x95, 0x9e, 0x09, 0xa0, 0x15, 0x32, 0xc2, 0x03, 0xd4, 0x84, 0x8c, 0xb4, 0xcd, 0x19, 0x3d, + 0x85, 0x82, 0x54, 0x5c, 0xbe, 0xb1, 0x02, 0x68, 0x5d, 0xb3, 0x67, 0x28, 0xfc, 0x96, 0x1a, 0x66, + 0xdb, 0xec, 0x99, 0x86, 0x5a, 0xfb, 0x6b, 0x0b, 0xe0, 0x75, 0x64, 0x0c, 0xfd, 0x01, 0xf7, 0x57, + 0x26, 0x2d, 0xc2, 0x9b, 0xbf, 0x0c, 0xe5, 0xcf, 0x3f, 0x62, 0x54, 0xe3, 0x1c, 0x7a, 0x05, 0xc5, + 0xb0, 0xf1, 0xd1, 0xfe, 0x5a, 0x0a, 0x26, 0xff, 0x70, 0x95, 0x3f, 0x5d, 0x95, 0x8a, 0xb7, 0x1d, + 0xce, 0xa1, 0x73, 0x28, 0xc5, 0x46, 0x00, 0x4a, 0x1a, 0x5f, 0xb1, 0xd9, 0x50, 0x4e, 0x39, 0x06, + 0xe7, 0x50, 0x13, 0x76, 0xe2, 0xfd, 0x8f, 0x3e, 0x4b, 0x50, 0x8a, 0x4f, 0x86, 0x0c, 0xa9, 0x8b, + 0xc8, 0x53, 0x66, 0x6a, 0x49, 0x27, 0xac, 0x64, 0xd7, 0x8e, 0x4c, 0x05, 0x4f, 0x24, 0x55, 0xea, + 0x68, 0xd3, 0x9c, 0xc0, 0xb9, 0xe7, 0x0a, 0x3a, 0x05, 0xed, 0x9c, 0x32, 0xb4, 0x1f, 0x07, 0x2f, + 0x7a, 0xb2, 0xfc, 0x70, 0x2d, 0x1e, 0xf9, 0xf8, 0x16, 0xb4, 0xee, 0x2a, 0x73, 0xd1, 0x72, 0x19, + 0xa5, 0xf8, 0x11, 0xb6, 0x44, 0x3b, 0xa1, 0xc3, 0x38, 0x77, 0xa9, 0xc5, 0x32, 0xe8, 0xbf, 0x80, + 0x2e, 0x12, 0x3f, 0x48, 0x68, 0x17, 0x41, 0x3e, 0x4c, 0x6d, 0xa4, 0x20, 0xe7, 0x97, 0x50, 0xec, + 0xba, 0x64, 0xea, 0x8f, 0x3c, 0x96, 0x5a, 0xbd, 0xd4, 0xf3, 0x5f, 0x3d, 0xfb, 0xfd, 0xe9, 0x70, + 0xcc, 0x46, 0xf3, 0x7e, 0x65, 0xe0, 0x39, 0x55, 0xc7, 0xf3, 0xe7, 0x37, 0xa4, 0xda, 0x9f, 0x10, + 0x9f, 0x55, 0x13, 0xfe, 0xb4, 0xf5, 0xb7, 0x82, 0xe0, 0xd7, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, + 0xc2, 0xc8, 0x11, 0x5c, 0xd2, 0x09, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -855,6 +956,7 @@ const _ = grpc.SupportPackageIsVersion4 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ManagementClient interface { NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) + // rpc NodeState (google.protobuf.Empty) returns (NodeStateResponse) {} NodeInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeInfoResponse, error) ClusterJoin(ctx context.Context, in *ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) ClusterLeave(ctx context.Context, in *ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) @@ -936,7 +1038,7 @@ func (c *managementClient) ClusterWatch(ctx context.Context, in *empty.Empty, op } type Management_ClusterWatchClient interface { - Recv() (*ClusterInfoResponse, error) + Recv() (*ClusterWatchResponse, error) grpc.ClientStream } @@ -944,8 +1046,8 @@ type managementClusterWatchClient struct { grpc.ClientStream } -func (x *managementClusterWatchClient) Recv() (*ClusterInfoResponse, error) { - m := new(ClusterInfoResponse) +func (x *managementClusterWatchClient) Recv() (*ClusterWatchResponse, error) { + m := new(ClusterWatchResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } @@ -1023,6 +1125,7 @@ func (c *managementClient) Snapshot(ctx context.Context, in *empty.Empty, opts . // ManagementServer is the server API for Management service. type ManagementServer interface { NodeHealthCheck(context.Context, *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) + // rpc NodeState (google.protobuf.Empty) returns (NodeStateResponse) {} NodeInfo(context.Context, *empty.Empty) (*NodeInfoResponse, error) ClusterJoin(context.Context, *ClusterJoinRequest) (*empty.Empty, error) ClusterLeave(context.Context, *ClusterLeaveRequest) (*empty.Empty, error) @@ -1138,7 +1241,7 @@ func _Management_ClusterWatch_Handler(srv interface{}, stream grpc.ServerStream) } type Management_ClusterWatchServer interface { - Send(*ClusterInfoResponse) error + Send(*ClusterWatchResponse) error grpc.ServerStream } @@ -1146,7 +1249,7 @@ type managementClusterWatchServer struct { grpc.ServerStream } -func (x *managementClusterWatchServer) Send(m *ClusterInfoResponse) error { +func (x *managementClusterWatchServer) Send(m *ClusterWatchResponse) error { return x.ServerStream.SendMsg(m) } diff --git a/protobuf/management/management.proto b/protobuf/management/management.proto index 9f87fa7..36cca52 100644 --- a/protobuf/management/management.proto +++ b/protobuf/management/management.proto @@ -28,7 +28,7 @@ service Management { rpc ClusterJoin (ClusterJoinRequest) returns (google.protobuf.Empty) {} rpc ClusterLeave (ClusterLeaveRequest) returns (google.protobuf.Empty) {} rpc ClusterInfo (google.protobuf.Empty) returns (ClusterInfoResponse) {} - rpc ClusterWatch (google.protobuf.Empty) returns (stream ClusterInfoResponse) {} + rpc ClusterWatch (google.protobuf.Empty) returns (stream ClusterWatchResponse) {} rpc Get (GetRequest) returns (GetResponse) {} rpc Set (SetRequest) returns (google.protobuf.Empty) {} @@ -58,7 +58,6 @@ message NodeHealthCheckResponse { State state = 1; } -// use for raft message Metadata { string grpc_address = 1; string http_address = 2; @@ -91,6 +90,19 @@ message ClusterInfoResponse { Cluster cluster = 1; } +message ClusterWatchResponse { + enum Event { + UNKNOWN = 0; + JOIN = 1; + LEAVE = 2; + UPDATE = 3; + } + Event event = 1; + string id = 2; + Node node = 3; + Cluster cluster = 4; +} + message GetRequest { string key = 1; } From a43b28b4c31760465755dbb3a2b0c19f32dd4504 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Wed, 31 Jul 2019 17:00:42 +0900 Subject: [PATCH 12/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index 9ee2110..1a231ba 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -17,6 +17,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Change subcommands #85 - Update protobuf #86 - Change protobuf #87 +- Change the cluster watching method #90 ## [v0.7.1] - 2019-07-18 From 9cdc1dd20f54a5ea1319a4a7213c13986210803f Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Wed, 31 Jul 2019 19:08:06 +0900 Subject: [PATCH 13/76] Refactoring (#91) --- manager/grpc_service.go | 191 ++++++++++++++++------------------------ 1 file changed, 75 insertions(+), 116 deletions(-) diff --git a/manager/grpc_service.go b/manager/grpc_service.go index 300a006..2216077 100644 --- a/manager/grpc_service.go +++ b/manager/grpc_service.go @@ -27,7 +27,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/hashicorp/raft" blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/hashutils" "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/management" "go.uber.org/zap" @@ -137,25 +136,12 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { ticker := time.NewTicker(checkInterval) defer ticker.Stop() - // create initial cluster hash - clusterHash, err := hashutils.Hash(s.cluster) - if err != nil { - s.logger.Error(err.Error()) - return - } - savedCluster, err := s.cloneCluster(s.cluster) if err != nil { s.logger.Error(err.Error()) return } - peersHash, err := hashutils.Hash(s.peers) - if err != nil { - s.logger.Error(err.Error()) - return - } - for { select { case <-s.updateClusterStopCh: @@ -168,13 +154,6 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { return } - // create latest cluster hash - newClusterHash, err := hashutils.Hash(s.cluster) - if err != nil { - s.logger.Error(err.Error()) - return - } - snapshotCluster, err := s.cloneCluster(s.cluster) if err != nil { s.logger.Error(err.Error()) @@ -188,99 +167,70 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { } } - // create latest peers hash - newPeersHash, err := hashutils.Hash(s.peers) - if err != nil { - s.logger.Error(err.Error()) - return - } - - // compare peers hash - if !cmp.Equal(peersHash, newPeersHash) { - // open clients - for id, node := range s.peers.Nodes { - if node.Metadata.GrpcAddress == "" { - s.logger.Warn("missing gRPC address", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } + // open clients for peer nodes + for id, node := range s.peers.Nodes { + if node.Metadata.GrpcAddress == "" { + s.logger.Debug("missing gRPC address", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + continue + } - client, exist := s.peerClients[id] - if exist { - if client.GetAddress() != node.Metadata.GrpcAddress { - s.logger.Info("recreate gRPC client", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - delete(s.peerClients, id) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id)) - } - newClient, err := NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - s.peerClients[id] = newClient + client, exist := s.peerClients[id] + if exist { + if client.GetAddress() != node.Metadata.GrpcAddress { + s.logger.Info("recreate gRPC client", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + delete(s.peerClients, id) + err = client.Close() + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", id)) } - } else { - s.logger.Info("create gRPC client", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) newClient, err := NewGRPCClient(node.Metadata.GrpcAddress) if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + s.logger.Error(err.Error(), zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) continue } s.peerClients[id] = newClient } + } else { + s.logger.Info("create gRPC client", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + newClient, err := NewGRPCClient(node.Metadata.GrpcAddress) + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + continue + } + s.peerClients[id] = newClient } + } - // close client for non-existent node - for id, client := range s.peerClients { - if _, exist := s.peers.Nodes[id]; !exist { - s.logger.Info("close gRPC client", zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) - } - delete(s.peerClients, id) + // close clients for non-existent peer nodes + for id, client := range s.peerClients { + if _, exist := s.peers.Nodes[id]; !exist { + s.logger.Info("close gRPC client", zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) + err = client.Close() + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) } + delete(s.peerClients, id) } - - // update peers hash - peersHash = newPeersHash } - // compare cluster hash - if !cmp.Equal(clusterHash, newClusterHash) { - // check joined and updated nodes - for id, node := range snapshotCluster.Nodes { - nodeSnapshot, exist := savedCluster.Nodes[id] - if exist { + // check joined and updated nodes + for id, node := range snapshotCluster.Nodes { + nodeSnapshot, exist := savedCluster.Nodes[id] + if exist { + // node exists in the cluster + n1, err := json.Marshal(node) + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", node)) + } + n2, err := json.Marshal(nodeSnapshot) + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", nodeSnapshot)) + } + if !cmp.Equal(n1, n2) { // node updated - n1, err := json.Marshal(node) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", node)) - } - n2, err := json.Marshal(nodeSnapshot) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", nodeSnapshot)) - } - if !cmp.Equal(n1, n2) { - // notify the cluster changes - clusterResp := &management.ClusterWatchResponse{ - Event: management.ClusterWatchResponse_UPDATE, - Id: id, - Node: node, - Cluster: snapshotCluster, - } - for c := range s.clusterChans { - c <- *clusterResp - } - } else { - // no change - } - } else { - // node joined // notify the cluster changes clusterResp := &management.ClusterWatchResponse{ - Event: management.ClusterWatchResponse_JOIN, + Event: management.ClusterWatchResponse_UPDATE, Id: id, Node: node, Cluster: snapshotCluster, @@ -289,30 +239,39 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { c <- *clusterResp } } + } else { + // node joined + // notify the cluster changes + clusterResp := &management.ClusterWatchResponse{ + Event: management.ClusterWatchResponse_JOIN, + Id: id, + Node: node, + Cluster: snapshotCluster, + } + for c := range s.clusterChans { + c <- *clusterResp + } } + } - // check left nodes - for id, node := range savedCluster.Nodes { - if _, exist := snapshotCluster.Nodes[id]; !exist { - // node left - // notify the cluster changes - clusterResp := &management.ClusterWatchResponse{ - Event: management.ClusterWatchResponse_LEAVE, - Id: id, - Node: node, - Cluster: snapshotCluster, - } - for c := range s.clusterChans { - c <- *clusterResp - } + // check left nodes + for id, node := range savedCluster.Nodes { + if _, exist := snapshotCluster.Nodes[id]; !exist { + // node left + // notify the cluster changes + clusterResp := &management.ClusterWatchResponse{ + Event: management.ClusterWatchResponse_LEAVE, + Id: id, + Node: node, + Cluster: snapshotCluster, + } + for c := range s.clusterChans { + c <- *clusterResp } } - - // update cluster hash - clusterHash = newClusterHash - - savedCluster = snapshotCluster } + + savedCluster = snapshotCluster default: time.Sleep(100 * time.Millisecond) } From a3d13c4b5fe19dbacf2d92e2175d4f7510e4bbe1 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Wed, 31 Jul 2019 19:48:31 +0900 Subject: [PATCH 14/76] Change cluster watch command for manager (#92) --- cmd/blast/manager_cluster_watch.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/cmd/blast/manager_cluster_watch.go b/cmd/blast/manager_cluster_watch.go index c44bb24..bb5ae7c 100644 --- a/cmd/blast/manager_cluster_watch.go +++ b/cmd/blast/manager_cluster_watch.go @@ -22,6 +22,7 @@ import ( "os" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf/management" "github.com/urfave/cli" ) @@ -39,10 +40,21 @@ func managerClusterWatch(c *cli.Context) error { } }() - err = managerClusterInfo(c) + cluster, err := client.ClusterInfo() if err != nil { return err } + resp := &management.ClusterWatchResponse{ + Event: 0, + Id: "", + Node: nil, + Cluster: cluster, + } + clusterBytes, err := json.MarshalIndent(resp, "", " ") + if err != nil { + return err + } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(clusterBytes))) watchClient, err := client.ClusterWatch() if err != nil { From e57d1e6da5f03b7e50cdae7dc4337dc8078c6f67 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Wed, 31 Jul 2019 19:49:11 +0900 Subject: [PATCH 15/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index 1a231ba..f9fbeed 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -18,6 +18,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Update protobuf #86 - Change protobuf #87 - Change the cluster watching method #90 +- Change cluster watch command for manager #92 ## [v0.7.1] - 2019-07-18 From a460112b37f2e47444968c3166d224f7d4aeede1 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Wed, 31 Jul 2019 21:49:15 +0900 Subject: [PATCH 16/76] Change node state to enum from string (#93) --- dispatcher/grpc_service.go | 5 +- dispatcher/server_test.go | 12 +- indexer/grpc_service.go | 4 +- manager/grpc_service.go | 47 ++++---- manager/raft_fsm_test.go | 29 +++-- manager/server_test.go | 85 +++++++------ protobuf/management/management.pb.go | 173 ++++++++++++++++----------- protobuf/management/management.proto | 9 +- 8 files changed, 199 insertions(+), 165 deletions(-) diff --git a/dispatcher/grpc_service.go b/dispatcher/grpc_service.go index ca48e1f..ed5ca8d 100644 --- a/dispatcher/grpc_service.go +++ b/dispatcher/grpc_service.go @@ -29,7 +29,6 @@ import ( "github.com/blevesearch/bleve/search" "github.com/golang/protobuf/ptypes/any" "github.com/golang/protobuf/ptypes/empty" - "github.com/hashicorp/raft" "github.com/mosuka/blast/indexer" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/manager" @@ -99,7 +98,7 @@ func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { continue } - if node.State == raft.Leader.String() || node.State == raft.Follower.String() { + if node.State == management.Node_FOLLOWER || node.State == management.Node_LEADER { var ok bool client, ok = s.managerClients[id] if ok { @@ -108,7 +107,7 @@ func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { s.logger.Error("node does not exist", zap.String("id", id)) } } else { - s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", node.State)) + s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", node.State.String())) } } diff --git a/dispatcher/server_test.go b/dispatcher/server_test.go index ec4ff53..64a5ca3 100644 --- a/dispatcher/server_test.go +++ b/dispatcher/server_test.go @@ -49,7 +49,7 @@ func TestServer_Start(t *testing.T) { managerNode1 := &management.Node{ BindAddress: managerBindAddress1, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: managerGrpcAddress1, HttpAddress: managerHttpAddress1, @@ -85,7 +85,7 @@ func TestServer_Start(t *testing.T) { managerNode2 := &management.Node{ BindAddress: managerBindAddress2, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: managerGrpcAddress2, HttpAddress: managerHttpAddress2, @@ -121,7 +121,7 @@ func TestServer_Start(t *testing.T) { managerNode3 := &management.Node{ BindAddress: managerBindAddress3, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: managerGrpcAddress3, HttpAddress: managerHttpAddress3, @@ -167,7 +167,7 @@ func TestServer_Start(t *testing.T) { Nodes: map[string]*management.Node{ managerNodeId1: { BindAddress: managerBindAddress1, - State: raft.Leader.String(), + State: management.Node_LEADER, Metadata: &management.Metadata{ GrpcAddress: managerGrpcAddress1, HttpAddress: managerHttpAddress1, @@ -175,7 +175,7 @@ func TestServer_Start(t *testing.T) { }, managerNodeId2: { BindAddress: managerBindAddress2, - State: raft.Follower.String(), + State: management.Node_FOLLOWER, Metadata: &management.Metadata{ GrpcAddress: managerGrpcAddress2, HttpAddress: managerHttpAddress2, @@ -183,7 +183,7 @@ func TestServer_Start(t *testing.T) { }, managerNodeId3: { BindAddress: managerBindAddress3, - State: raft.Follower.String(), + State: management.Node_FOLLOWER, Metadata: &management.Metadata{ GrpcAddress: managerGrpcAddress3, HttpAddress: managerHttpAddress3, diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go index 88ccf16..5b8d5cb 100644 --- a/indexer/grpc_service.go +++ b/indexer/grpc_service.go @@ -107,7 +107,7 @@ func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { continue } - if node.State == raft.Leader.String() || node.State == raft.Follower.String() { + if node.State == management.Node_FOLLOWER || node.State == management.Node_LEADER { var ok bool client, ok = s.managerClients[id] if ok { @@ -116,7 +116,7 @@ func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { s.logger.Error("node does not exist", zap.String("id", id)) } } else { - s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", node.State)) + s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", node.State.String())) } } diff --git a/manager/grpc_service.go b/manager/grpc_service.go index 2216077..6869c6c 100644 --- a/manager/grpc_service.go +++ b/manager/grpc_service.go @@ -79,35 +79,18 @@ func (s *GRPCService) Stop() error { } func (s *GRPCService) getLeaderClient() (*GRPCClient, error) { - var client *GRPCClient - for id, node := range s.cluster.Nodes { - state := node.State - if node.State == "" { - s.logger.Warn("missing state", zap.String("id", id), zap.String("state", state)) - continue - } - - if state == raft.Leader.String() { - var ok bool - client, ok = s.peerClients[id] - if ok { - break - } else { - s.logger.Error("node does not exist", zap.String("id", id)) + switch node.State { + case management.Node_LEADER: + if client, exist := s.peerClients[id]; exist { + return client, nil } - } else { - s.logger.Debug("not a leader", zap.String("id", id)) } } - if client == nil { - err := errors.New("there is no leader") - s.logger.Error(err.Error()) - return nil, err - } - - return client, nil + err := errors.New("there is no leader") + s.logger.Error(err.Error()) + return nil, err } func (s *GRPCService) cloneCluster(cluster *management.Cluster) (*management.Cluster, error) { @@ -319,7 +302,19 @@ func (s *GRPCService) NodeID() string { func (s *GRPCService) getSelfNode() *management.Node { node := s.raftServer.node - node.State = s.raftServer.State().String() + + switch s.raftServer.State() { + case raft.Follower: + node.State = management.Node_FOLLOWER + case raft.Candidate: + node.State = management.Node_CANDIDATE + case raft.Leader: + node.State = management.Node_LEADER + case raft.Shutdown: + node.State = management.Node_SHUTDOWN + default: + node.State = management.Node_UNKNOWN + } return node } @@ -336,7 +331,7 @@ func (s *GRPCService) getPeerNode(id string) (*management.Node, error) { s.logger.Debug(err.Error(), zap.String("id", id)) return &management.Node{ BindAddress: "", - State: raft.Shutdown.String(), + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: "", HttpAddress: "", diff --git a/manager/raft_fsm_test.go b/manager/raft_fsm_test.go index 983ff68..99d814c 100644 --- a/manager/raft_fsm_test.go +++ b/manager/raft_fsm_test.go @@ -20,7 +20,6 @@ import ( "reflect" "testing" - "github.com/hashicorp/raft" "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/protobuf/management" ) @@ -58,7 +57,7 @@ func TestRaftFSM_GetNode(t *testing.T) { "node1", &management.Node{ BindAddress: "2100", - State: raft.Leader.String(), + State: management.Node_LEADER, Metadata: &management.Metadata{ GrpcAddress: "5100", HttpAddress: "8100", @@ -69,7 +68,7 @@ func TestRaftFSM_GetNode(t *testing.T) { "node2", &management.Node{ BindAddress: "2110", - State: raft.Follower.String(), + State: management.Node_FOLLOWER, Metadata: &management.Metadata{ GrpcAddress: "5110", HttpAddress: "8110", @@ -80,7 +79,7 @@ func TestRaftFSM_GetNode(t *testing.T) { "node3", &management.Node{ BindAddress: "2120", - State: raft.Follower.String(), + State: management.Node_FOLLOWER, Metadata: &management.Metadata{ GrpcAddress: "5120", HttpAddress: "8120", @@ -95,7 +94,7 @@ func TestRaftFSM_GetNode(t *testing.T) { exp1 := &management.Node{ BindAddress: "2110", - State: raft.Follower.String(), + State: management.Node_FOLLOWER, Metadata: &management.Metadata{ GrpcAddress: "5110", HttpAddress: "8110", @@ -142,7 +141,7 @@ func TestRaftFSM_SetNode(t *testing.T) { "node1", &management.Node{ BindAddress: "2100", - State: raft.Leader.String(), + State: management.Node_LEADER, Metadata: &management.Metadata{ GrpcAddress: "5100", HttpAddress: "8100", @@ -153,7 +152,7 @@ func TestRaftFSM_SetNode(t *testing.T) { "node2", &management.Node{ BindAddress: "2110", - State: raft.Follower.String(), + State: management.Node_FOLLOWER, Metadata: &management.Metadata{ GrpcAddress: "5110", HttpAddress: "8110", @@ -164,7 +163,7 @@ func TestRaftFSM_SetNode(t *testing.T) { "node3", &management.Node{ BindAddress: "2120", - State: raft.Follower.String(), + State: management.Node_FOLLOWER, Metadata: &management.Metadata{ GrpcAddress: "5120", HttpAddress: "8120", @@ -178,7 +177,7 @@ func TestRaftFSM_SetNode(t *testing.T) { } exp1 := &management.Node{ BindAddress: "2110", - State: raft.Follower.String(), + State: management.Node_FOLLOWER, Metadata: &management.Metadata{ GrpcAddress: "5110", HttpAddress: "8110", @@ -193,7 +192,7 @@ func TestRaftFSM_SetNode(t *testing.T) { "node2", &management.Node{ BindAddress: "2110", - State: raft.Shutdown.String(), + State: management.Node_SHUTDOWN, Metadata: &management.Metadata{ GrpcAddress: "5110", HttpAddress: "8110", @@ -207,7 +206,7 @@ func TestRaftFSM_SetNode(t *testing.T) { } exp2 := &management.Node{ BindAddress: "2110", - State: raft.Shutdown.String(), + State: management.Node_SHUTDOWN, Metadata: &management.Metadata{ GrpcAddress: "5110", HttpAddress: "8110", @@ -253,7 +252,7 @@ func TestRaftFSM_DeleteNode(t *testing.T) { "node1", &management.Node{ BindAddress: "2100", - State: raft.Leader.String(), + State: management.Node_LEADER, Metadata: &management.Metadata{ GrpcAddress: "5100", HttpAddress: "8100", @@ -264,7 +263,7 @@ func TestRaftFSM_DeleteNode(t *testing.T) { "node2", &management.Node{ BindAddress: "2110", - State: raft.Follower.String(), + State: management.Node_FOLLOWER, Metadata: &management.Metadata{ GrpcAddress: "5110", HttpAddress: "8110", @@ -275,7 +274,7 @@ func TestRaftFSM_DeleteNode(t *testing.T) { "node3", &management.Node{ BindAddress: "2120", - State: raft.Follower.String(), + State: management.Node_FOLLOWER, Metadata: &management.Metadata{ GrpcAddress: "5120", HttpAddress: "8120", @@ -289,7 +288,7 @@ func TestRaftFSM_DeleteNode(t *testing.T) { } exp1 := &management.Node{ BindAddress: "2110", - State: raft.Follower.String(), + State: management.Node_FOLLOWER, Metadata: &management.Metadata{ GrpcAddress: "5110", HttpAddress: "8110", diff --git a/manager/server_test.go b/manager/server_test.go index e389942..d1c30b8 100644 --- a/manager/server_test.go +++ b/manager/server_test.go @@ -22,7 +22,6 @@ import ( "testing" "time" - "github.com/hashicorp/raft" blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/protobuf/management" @@ -47,7 +46,7 @@ func TestServer_Start(t *testing.T) { node := &management.Node{ BindAddress: bindAddress, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress, HttpAddress: httpAddress, @@ -94,7 +93,7 @@ func TestServer_HealthCheck(t *testing.T) { node := &management.Node{ BindAddress: bindAddress, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress, HttpAddress: httpAddress, @@ -188,7 +187,7 @@ func TestServer_GetNode(t *testing.T) { node := &management.Node{ BindAddress: bindAddress, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress, HttpAddress: httpAddress, @@ -238,7 +237,7 @@ func TestServer_GetNode(t *testing.T) { } expNodeInfo := &management.Node{ BindAddress: bindAddress, - State: raft.Leader.String(), + State: management.Node_LEADER, Metadata: &management.Metadata{ GrpcAddress: grpcAddress, HttpAddress: httpAddress, @@ -267,7 +266,7 @@ func TestServer_GetCluster(t *testing.T) { node := &management.Node{ BindAddress: bindAddress, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress, HttpAddress: httpAddress, @@ -319,7 +318,7 @@ func TestServer_GetCluster(t *testing.T) { Nodes: map[string]*management.Node{ nodeId: { BindAddress: bindAddress, - State: raft.Leader.String(), + State: management.Node_LEADER, Metadata: &management.Metadata{ GrpcAddress: grpcAddress, HttpAddress: httpAddress, @@ -350,7 +349,7 @@ func TestServer_SetState(t *testing.T) { node := &management.Node{ BindAddress: bindAddress, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress, HttpAddress: httpAddress, @@ -431,7 +430,7 @@ func TestServer_GetState(t *testing.T) { node := &management.Node{ BindAddress: bindAddress, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress, HttpAddress: httpAddress, @@ -512,7 +511,7 @@ func TestServer_DeleteState(t *testing.T) { node := &management.Node{ BindAddress: bindAddress, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress, HttpAddress: httpAddress, @@ -614,7 +613,7 @@ func TestCluster_Start(t *testing.T) { node1 := &management.Node{ BindAddress: bindAddress1, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -650,7 +649,7 @@ func TestCluster_Start(t *testing.T) { node2 := &management.Node{ BindAddress: bindAddress2, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -686,7 +685,7 @@ func TestCluster_Start(t *testing.T) { node3 := &management.Node{ BindAddress: bindAddress3, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, @@ -733,7 +732,7 @@ func TestCluster_HealthCheck(t *testing.T) { node1 := &management.Node{ BindAddress: bindAddress1, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -769,7 +768,7 @@ func TestCluster_HealthCheck(t *testing.T) { node2 := &management.Node{ BindAddress: bindAddress2, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -805,7 +804,7 @@ func TestCluster_HealthCheck(t *testing.T) { node3 := &management.Node{ BindAddress: bindAddress3, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, @@ -975,7 +974,7 @@ func TestCluster_GetNode(t *testing.T) { node1 := &management.Node{ BindAddress: bindAddress1, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -1011,7 +1010,7 @@ func TestCluster_GetNode(t *testing.T) { node2 := &management.Node{ BindAddress: bindAddress2, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -1047,7 +1046,7 @@ func TestCluster_GetNode(t *testing.T) { node3 := &management.Node{ BindAddress: bindAddress3, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, @@ -1106,7 +1105,7 @@ func TestCluster_GetNode(t *testing.T) { } expNode11 := &management.Node{ BindAddress: bindAddress1, - State: raft.Leader.String(), + State: management.Node_LEADER, Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -1123,7 +1122,7 @@ func TestCluster_GetNode(t *testing.T) { } expNode21 := &management.Node{ BindAddress: bindAddress2, - State: raft.Follower.String(), + State: management.Node_FOLLOWER, Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -1140,7 +1139,7 @@ func TestCluster_GetNode(t *testing.T) { } expNode31 := &management.Node{ BindAddress: bindAddress3, - State: raft.Follower.String(), + State: management.Node_FOLLOWER, Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, @@ -1169,7 +1168,7 @@ func TestCluster_GetCluster(t *testing.T) { node1 := &management.Node{ BindAddress: bindAddress1, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -1205,7 +1204,7 @@ func TestCluster_GetCluster(t *testing.T) { node2 := &management.Node{ BindAddress: bindAddress2, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -1241,7 +1240,7 @@ func TestCluster_GetCluster(t *testing.T) { node3 := &management.Node{ BindAddress: bindAddress3, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, @@ -1302,7 +1301,7 @@ func TestCluster_GetCluster(t *testing.T) { Nodes: map[string]*management.Node{ nodeId1: { BindAddress: bindAddress1, - State: raft.Leader.String(), + State: management.Node_LEADER, Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -1310,7 +1309,7 @@ func TestCluster_GetCluster(t *testing.T) { }, nodeId2: { BindAddress: bindAddress2, - State: raft.Follower.String(), + State: management.Node_FOLLOWER, Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -1318,7 +1317,7 @@ func TestCluster_GetCluster(t *testing.T) { }, nodeId3: { BindAddress: bindAddress3, - State: raft.Follower.String(), + State: management.Node_FOLLOWER, Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, @@ -1339,7 +1338,7 @@ func TestCluster_GetCluster(t *testing.T) { Nodes: map[string]*management.Node{ nodeId1: { BindAddress: bindAddress1, - State: raft.Leader.String(), + State: management.Node_LEADER, Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -1347,7 +1346,7 @@ func TestCluster_GetCluster(t *testing.T) { }, nodeId2: { BindAddress: bindAddress2, - State: raft.Follower.String(), + State: management.Node_FOLLOWER, Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -1355,7 +1354,7 @@ func TestCluster_GetCluster(t *testing.T) { }, nodeId3: { BindAddress: bindAddress3, - State: raft.Follower.String(), + State: management.Node_FOLLOWER, Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, @@ -1376,7 +1375,7 @@ func TestCluster_GetCluster(t *testing.T) { Nodes: map[string]*management.Node{ nodeId1: { BindAddress: bindAddress1, - State: raft.Leader.String(), + State: management.Node_LEADER, Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -1384,7 +1383,7 @@ func TestCluster_GetCluster(t *testing.T) { }, nodeId2: { BindAddress: bindAddress2, - State: raft.Follower.String(), + State: management.Node_FOLLOWER, Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -1392,7 +1391,7 @@ func TestCluster_GetCluster(t *testing.T) { }, nodeId3: { BindAddress: bindAddress3, - State: raft.Follower.String(), + State: management.Node_FOLLOWER, Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, @@ -1423,7 +1422,7 @@ func TestCluster_SetState(t *testing.T) { node1 := &management.Node{ BindAddress: bindAddress1, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -1459,7 +1458,7 @@ func TestCluster_SetState(t *testing.T) { node2 := &management.Node{ BindAddress: bindAddress2, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -1495,7 +1494,7 @@ func TestCluster_SetState(t *testing.T) { node3 := &management.Node{ BindAddress: bindAddress3, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, @@ -1670,7 +1669,7 @@ func TestCluster_GetState(t *testing.T) { node1 := &management.Node{ BindAddress: bindAddress1, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -1706,7 +1705,7 @@ func TestCluster_GetState(t *testing.T) { node2 := &management.Node{ BindAddress: bindAddress2, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -1742,7 +1741,7 @@ func TestCluster_GetState(t *testing.T) { node3 := &management.Node{ BindAddress: bindAddress3, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, @@ -1917,7 +1916,7 @@ func TestCluster_DeleteState(t *testing.T) { node1 := &management.Node{ BindAddress: bindAddress1, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress1, HttpAddress: httpAddress1, @@ -1953,7 +1952,7 @@ func TestCluster_DeleteState(t *testing.T) { node2 := &management.Node{ BindAddress: bindAddress2, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress2, HttpAddress: httpAddress2, @@ -1989,7 +1988,7 @@ func TestCluster_DeleteState(t *testing.T) { node3 := &management.Node{ BindAddress: bindAddress3, - State: "", + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddress3, HttpAddress: httpAddress3, diff --git a/protobuf/management/management.pb.go b/protobuf/management/management.pb.go index f5cf092..83c85b3 100644 --- a/protobuf/management/management.pb.go +++ b/protobuf/management/management.pb.go @@ -89,6 +89,40 @@ func (NodeHealthCheckResponse_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor_5e030ad796566078, []int{1, 0} } +type Node_State int32 + +const ( + Node_UNKNOWN Node_State = 0 + Node_FOLLOWER Node_State = 1 + Node_CANDIDATE Node_State = 2 + Node_LEADER Node_State = 3 + Node_SHUTDOWN Node_State = 4 +) + +var Node_State_name = map[int32]string{ + 0: "UNKNOWN", + 1: "FOLLOWER", + 2: "CANDIDATE", + 3: "LEADER", + 4: "SHUTDOWN", +} + +var Node_State_value = map[string]int32{ + "UNKNOWN": 0, + "FOLLOWER": 1, + "CANDIDATE": 2, + "LEADER": 3, + "SHUTDOWN": 4, +} + +func (x Node_State) String() string { + return proto.EnumName(Node_State_name, int32(x)) +} + +func (Node_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{3, 0} +} + type ClusterWatchResponse_Event int32 const ( @@ -226,7 +260,6 @@ func (m *NodeHealthCheckResponse) GetState() NodeHealthCheckResponse_State { return NodeHealthCheckResponse_HEALTHY } -// use for raft type Metadata struct { GrpcAddress string `protobuf:"bytes,1,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` HttpAddress string `protobuf:"bytes,2,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` @@ -275,12 +308,12 @@ func (m *Metadata) GetHttpAddress() string { } type Node struct { - BindAddress string `protobuf:"bytes,1,opt,name=bind_address,json=bindAddress,proto3" json:"bind_address,omitempty"` - State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` - Metadata *Metadata `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + BindAddress string `protobuf:"bytes,1,opt,name=bind_address,json=bindAddress,proto3" json:"bind_address,omitempty"` + State Node_State `protobuf:"varint,2,opt,name=state,proto3,enum=management.Node_State" json:"state,omitempty"` + Metadata *Metadata `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Node) Reset() { *m = Node{} } @@ -315,11 +348,11 @@ func (m *Node) GetBindAddress() string { return "" } -func (m *Node) GetState() string { +func (m *Node) GetState() Node_State { if m != nil { return m.State } - return "" + return Node_UNKNOWN } func (m *Node) GetMetadata() *Metadata { @@ -856,6 +889,7 @@ func (m *WatchResponse) GetValue() *any.Any { func init() { proto.RegisterEnum("management.NodeHealthCheckRequest_Probe", NodeHealthCheckRequest_Probe_name, NodeHealthCheckRequest_Probe_value) proto.RegisterEnum("management.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) + proto.RegisterEnum("management.Node_State", Node_State_name, Node_State_value) proto.RegisterEnum("management.ClusterWatchResponse_Event", ClusterWatchResponse_Event_name, ClusterWatchResponse_Event_value) proto.RegisterEnum("management.WatchResponse_Command", WatchResponse_Command_name, WatchResponse_Command_value) proto.RegisterType((*NodeHealthCheckRequest)(nil), "management.NodeHealthCheckRequest") @@ -882,65 +916,68 @@ func init() { } var fileDescriptor_5e030ad796566078 = []byte{ - // 920 bytes of a gzipped FileDescriptorProto + // 965 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0x7f, 0x6f, 0xda, 0x46, - 0x18, 0xc6, 0x36, 0x0e, 0xe4, 0x25, 0x69, 0xad, 0x6b, 0x94, 0x26, 0x6c, 0xea, 0x92, 0x5b, 0x57, - 0x65, 0xab, 0x0a, 0x15, 0x5b, 0xb5, 0x6c, 0xeb, 0x7e, 0xd0, 0x60, 0x25, 0x30, 0x4a, 0x22, 0x43, - 0x56, 0x75, 0x9a, 0x54, 0x1d, 0xf8, 0x0a, 0x28, 0xd8, 0xa6, 0xf8, 0x88, 0x94, 0xcf, 0xb0, 0x49, - 0xfb, 0x26, 0xfb, 0x77, 0x5f, 0x6b, 0x1f, 0x61, 0x3a, 0xdf, 0xd9, 0x18, 0xb0, 0x4d, 0xff, 0xe3, - 0xde, 0x7b, 0x9e, 0xe7, 0x9e, 0xf7, 0xbd, 0x7b, 0x5f, 0x03, 0x8f, 0xa7, 0x33, 0x8f, 0x79, 0xfd, - 0xf9, 0xfb, 0xaa, 0x43, 0x5c, 0x32, 0xa4, 0x0e, 0x75, 0x59, 0xec, 0x67, 0x25, 0xd8, 0x46, 0xb0, - 0x88, 0x94, 0x0f, 0x87, 0x9e, 0x37, 0x9c, 0xd0, 0x6a, 0x44, 0x24, 0xee, 0x9d, 0x80, 0x95, 0x3f, - 0x59, 0xdd, 0xa2, 0xce, 0x94, 0xc9, 0x4d, 0xfc, 0xb7, 0x02, 0xfb, 0x1d, 0xcf, 0xa6, 0x17, 0x94, - 0x4c, 0xd8, 0xe8, 0x6c, 0x44, 0x07, 0x37, 0x16, 0xfd, 0x30, 0xa7, 0x3e, 0x43, 0x3f, 0x81, 0x3e, - 0x9d, 0x79, 0x7d, 0x7a, 0xa0, 0x1c, 0x29, 0x27, 0xf7, 0x6a, 0x27, 0x95, 0x98, 0x81, 0x64, 0x4a, - 0xe5, 0x8a, 0xe3, 0x2d, 0x41, 0xc3, 0x2f, 0x40, 0x0f, 0xd6, 0xe8, 0x3e, 0x94, 0x2e, 0xcc, 0x7a, - 0xbb, 0x77, 0xd1, 0xec, 0x98, 0xdd, 0xae, 0x91, 0x43, 0x3b, 0x50, 0x6c, 0x37, 0x7f, 0x33, 0x83, - 0x95, 0x82, 0x76, 0x61, 0xdb, 0x32, 0xeb, 0x0d, 0xb1, 0xa9, 0xe2, 0x7f, 0x14, 0x78, 0xb8, 0x26, - 0xef, 0x4f, 0x3d, 0xd7, 0xa7, 0xe8, 0x67, 0xd0, 0x7d, 0x46, 0x58, 0x68, 0xe9, 0xcb, 0x4c, 0x4b, - 0x82, 0x53, 0xe9, 0x72, 0x82, 0x25, 0x78, 0xd8, 0x02, 0x3d, 0x58, 0xa3, 0x12, 0x14, 0x84, 0xa7, - 0xb7, 0x46, 0x8e, 0x3b, 0xb8, 0xee, 0x84, 0x4b, 0x05, 0x6d, 0x83, 0x5e, 0xe7, 0xfe, 0x0c, 0x15, - 0x15, 0x21, 0xdf, 0x30, 0xeb, 0x0d, 0x43, 0xe3, 0x41, 0xee, 0xf2, 0xad, 0x91, 0xe7, 0xf0, 0xce, - 0x65, 0xef, 0x9d, 0x58, 0xea, 0xf8, 0x0a, 0x8a, 0xaf, 0x29, 0x23, 0x36, 0x61, 0x04, 0x1d, 0xc3, - 0xce, 0x70, 0x36, 0x1d, 0xbc, 0x23, 0xb6, 0x3d, 0xa3, 0xbe, 0x1f, 0xf8, 0xdc, 0xb6, 0x4a, 0x3c, - 0x56, 0x17, 0x21, 0x0e, 0x19, 0x31, 0x36, 0x8d, 0x20, 0xaa, 0x80, 0xf0, 0x98, 0x84, 0xe0, 0x0f, - 0x90, 0xe7, 0xd9, 0x70, 0x68, 0x7f, 0xec, 0xda, 0xab, 0x6a, 0x3c, 0x16, 0xaa, 0xed, 0x85, 0x15, - 0x11, 0x32, 0x62, 0x81, 0x9e, 0x43, 0xd1, 0x91, 0x96, 0x0e, 0xb4, 0x23, 0xe5, 0xa4, 0x54, 0xdb, - 0x8b, 0x97, 0x2a, 0xb4, 0x6b, 0x45, 0x28, 0xfc, 0xa7, 0x02, 0x85, 0xb3, 0xc9, 0xdc, 0x67, 0x74, - 0x86, 0xbe, 0x01, 0xdd, 0xf5, 0x6c, 0xca, 0xcf, 0xd3, 0x4e, 0x4a, 0xb5, 0x47, 0x71, 0xaa, 0xc4, - 0x04, 0xd5, 0xf6, 0x4d, 0x97, 0xcd, 0xee, 0x2c, 0x01, 0x2e, 0xb7, 0x00, 0x16, 0x41, 0x64, 0x80, - 0x76, 0x43, 0xef, 0xa4, 0x63, 0xfe, 0x13, 0x3d, 0x01, 0xfd, 0x96, 0x4c, 0xe6, 0xc2, 0x69, 0xa9, - 0x66, 0xac, 0xde, 0x9d, 0x25, 0xb6, 0xbf, 0x57, 0x4f, 0x15, 0x7c, 0x0a, 0x06, 0x0f, 0x35, 0xdd, - 0xf7, 0x5e, 0x74, 0xf7, 0x8f, 0x21, 0xcf, 0x0f, 0x0a, 0x24, 0x93, 0xe8, 0xc1, 0x2e, 0x6e, 0x01, - 0x92, 0x16, 0x5b, 0xde, 0xd8, 0x0d, 0x9f, 0xf2, 0x3d, 0x50, 0xc7, 0xb6, 0x34, 0xa3, 0x8e, 0xed, - 0x48, 0x4b, 0xcd, 0xd4, 0xfa, 0x02, 0x1e, 0x48, 0xad, 0x36, 0x25, 0xb7, 0x34, 0x45, 0x0c, 0x37, - 0x22, 0xd8, 0x92, 0xdf, 0x67, 0x50, 0x18, 0x88, 0xb0, 0xb4, 0xfc, 0x20, 0xa1, 0x8e, 0x56, 0x88, - 0xc1, 0xff, 0x29, 0xb0, 0x27, 0x83, 0x6f, 0x08, 0x1b, 0x8c, 0x22, 0x9d, 0x97, 0xa0, 0xd3, 0x5b, - 0xea, 0x32, 0xf9, 0xe6, 0x9f, 0x24, 0xa8, 0x2c, 0x11, 0x2a, 0x26, 0x47, 0x5b, 0x82, 0x24, 0xcd, - 0xaa, 0x6b, 0x99, 0x6b, 0x59, 0x99, 0xc7, 0xbd, 0xe7, 0x3f, 0xc2, 0xfb, 0x0b, 0xd0, 0x83, 0x43, - 0x79, 0x57, 0x5d, 0x77, 0x7e, 0xed, 0x5c, 0xbe, 0xe9, 0x18, 0x39, 0xde, 0x3b, 0xad, 0xcb, 0x66, - 0x47, 0x34, 0x54, 0xdb, 0xac, 0x07, 0x0d, 0x05, 0xb0, 0x75, 0x7d, 0xd5, 0xa8, 0xf7, 0x4c, 0x43, - 0xc3, 0x8f, 0x00, 0xce, 0x29, 0x0b, 0xcb, 0xba, 0xf6, 0x62, 0xf0, 0x77, 0x50, 0x0a, 0xf6, 0x65, - 0x21, 0xbe, 0x0a, 0x1f, 0x90, 0x22, 0x5f, 0xb4, 0x98, 0x6b, 0x95, 0x70, 0xae, 0x55, 0xea, 0xee, - 0x9d, 0x7c, 0x44, 0xb8, 0x05, 0xd0, 0xcd, 0x90, 0x5e, 0x68, 0xa9, 0x9b, 0xb5, 0x8e, 0x61, 0xb7, - 0x41, 0x27, 0x94, 0xd1, 0x74, 0xa7, 0x47, 0xb0, 0x23, 0xef, 0x20, 0x0d, 0xf1, 0xaf, 0x02, 0xbb, - 0xcb, 0xf7, 0xfa, 0x03, 0x14, 0x06, 0x9e, 0xe3, 0x10, 0xd7, 0x96, 0x37, 0x7b, 0x1c, 0xaf, 0xf1, - 0xf2, 0x95, 0x9e, 0x09, 0xa0, 0x15, 0x32, 0xc2, 0x03, 0xd4, 0x84, 0x8c, 0xb4, 0xcd, 0x19, 0x3d, - 0x85, 0x82, 0x54, 0x5c, 0xbe, 0xb1, 0x02, 0x68, 0x5d, 0xb3, 0x67, 0x28, 0xfc, 0x96, 0x1a, 0x66, - 0xdb, 0xec, 0x99, 0x86, 0x5a, 0xfb, 0x6b, 0x0b, 0xe0, 0x75, 0x64, 0x0c, 0xfd, 0x01, 0xf7, 0x57, - 0x26, 0x2d, 0xc2, 0x9b, 0xbf, 0x0c, 0xe5, 0xcf, 0x3f, 0x62, 0x54, 0xe3, 0x1c, 0x7a, 0x05, 0xc5, - 0xb0, 0xf1, 0xd1, 0xfe, 0x5a, 0x0a, 0x26, 0xff, 0x70, 0x95, 0x3f, 0x5d, 0x95, 0x8a, 0xb7, 0x1d, - 0xce, 0xa1, 0x73, 0x28, 0xc5, 0x46, 0x00, 0x4a, 0x1a, 0x5f, 0xb1, 0xd9, 0x50, 0x4e, 0x39, 0x06, - 0xe7, 0x50, 0x13, 0x76, 0xe2, 0xfd, 0x8f, 0x3e, 0x4b, 0x50, 0x8a, 0x4f, 0x86, 0x0c, 0xa9, 0x8b, - 0xc8, 0x53, 0x66, 0x6a, 0x49, 0x27, 0xac, 0x64, 0xd7, 0x8e, 0x4c, 0x05, 0x4f, 0x24, 0x55, 0xea, - 0x68, 0xd3, 0x9c, 0xc0, 0xb9, 0xe7, 0x0a, 0x3a, 0x05, 0xed, 0x9c, 0x32, 0xb4, 0x1f, 0x07, 0x2f, - 0x7a, 0xb2, 0xfc, 0x70, 0x2d, 0x1e, 0xf9, 0xf8, 0x16, 0xb4, 0xee, 0x2a, 0x73, 0xd1, 0x72, 0x19, - 0xa5, 0xf8, 0x11, 0xb6, 0x44, 0x3b, 0xa1, 0xc3, 0x38, 0x77, 0xa9, 0xc5, 0x32, 0xe8, 0xbf, 0x80, - 0x2e, 0x12, 0x3f, 0x48, 0x68, 0x17, 0x41, 0x3e, 0x4c, 0x6d, 0xa4, 0x20, 0xe7, 0x97, 0x50, 0xec, - 0xba, 0x64, 0xea, 0x8f, 0x3c, 0x96, 0x5a, 0xbd, 0xd4, 0xf3, 0x5f, 0x3d, 0xfb, 0xfd, 0xe9, 0x70, - 0xcc, 0x46, 0xf3, 0x7e, 0x65, 0xe0, 0x39, 0x55, 0xc7, 0xf3, 0xe7, 0x37, 0xa4, 0xda, 0x9f, 0x10, - 0x9f, 0x55, 0x13, 0xfe, 0xb4, 0xf5, 0xb7, 0x82, 0xe0, 0xd7, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, - 0xc2, 0xc8, 0x11, 0x5c, 0xd2, 0x09, 0x00, 0x00, + 0x18, 0xc6, 0x36, 0x0e, 0xe4, 0x25, 0x69, 0xad, 0x6b, 0x95, 0x26, 0x6c, 0xea, 0x92, 0x5b, 0x57, + 0x65, 0xeb, 0x0a, 0x15, 0x5b, 0xb5, 0x6c, 0xeb, 0x7e, 0xb8, 0xb1, 0x17, 0xa0, 0xd4, 0x44, 0x86, + 0x34, 0xea, 0x34, 0xa9, 0x32, 0xf8, 0x0a, 0x28, 0x60, 0x33, 0x7c, 0x44, 0xca, 0x67, 0xd8, 0xa4, + 0x7d, 0x93, 0xfd, 0xbb, 0x6f, 0xb2, 0xcf, 0xb1, 0x8f, 0x30, 0x9d, 0xef, 0x6c, 0x6c, 0x30, 0xa4, + 0xff, 0x71, 0xef, 0x3d, 0xcf, 0x73, 0xcf, 0xfb, 0xde, 0xbd, 0xaf, 0x81, 0x47, 0xd3, 0x99, 0x4f, + 0xfd, 0xde, 0xfc, 0x7d, 0x75, 0xe2, 0x78, 0xce, 0x80, 0x4c, 0x88, 0x47, 0x13, 0x3f, 0x2b, 0xe1, + 0x36, 0x82, 0x45, 0xa4, 0x7c, 0x30, 0xf0, 0xfd, 0xc1, 0x98, 0x54, 0x63, 0xa2, 0xe3, 0xdd, 0x70, + 0x58, 0xf9, 0xa3, 0xe5, 0x2d, 0x32, 0x99, 0x52, 0xb1, 0x89, 0xff, 0x92, 0x60, 0xcf, 0xf2, 0x5d, + 0x52, 0x27, 0xce, 0x98, 0x0e, 0x4f, 0x87, 0xa4, 0x7f, 0x65, 0x93, 0xdf, 0xe7, 0x24, 0xa0, 0xe8, + 0x47, 0x50, 0xa7, 0x33, 0xbf, 0x47, 0xf6, 0xa5, 0x43, 0xe9, 0xf8, 0x4e, 0xed, 0xb8, 0x92, 0x30, + 0x90, 0x4d, 0xa9, 0x9c, 0x33, 0xbc, 0xcd, 0x69, 0xf8, 0x39, 0xa8, 0xe1, 0x1a, 0xdd, 0x85, 0x52, + 0xdd, 0xd4, 0x5b, 0xdd, 0x7a, 0xc3, 0x32, 0x3b, 0x1d, 0x2d, 0x87, 0x76, 0xa0, 0xd8, 0x6a, 0xbc, + 0x31, 0xc3, 0x95, 0x84, 0x76, 0x61, 0xdb, 0x36, 0x75, 0x83, 0x6f, 0xca, 0xf8, 0x6f, 0x09, 0x1e, + 0xac, 0xc8, 0x07, 0x53, 0xdf, 0x0b, 0x08, 0xfa, 0x09, 0xd4, 0x80, 0x3a, 0x34, 0xb2, 0xf4, 0xf9, + 0x46, 0x4b, 0x9c, 0x53, 0xe9, 0x30, 0x82, 0xcd, 0x79, 0xd8, 0x06, 0x35, 0x5c, 0xa3, 0x12, 0x14, + 0xb8, 0xa7, 0xb7, 0x5a, 0x8e, 0x39, 0xb8, 0xb0, 0xa2, 0xa5, 0x84, 0xb6, 0x41, 0xd5, 0x99, 0x3f, + 0x4d, 0x46, 0x45, 0xc8, 0x1b, 0xa6, 0x6e, 0x68, 0x0a, 0x0b, 0x32, 0x97, 0x6f, 0xb5, 0x3c, 0x83, + 0x5b, 0xed, 0xee, 0x3b, 0xbe, 0x54, 0xf1, 0x39, 0x14, 0x5f, 0x13, 0xea, 0xb8, 0x0e, 0x75, 0xd0, + 0x11, 0xec, 0x0c, 0x66, 0xd3, 0xfe, 0x3b, 0xc7, 0x75, 0x67, 0x24, 0x08, 0x42, 0x9f, 0xdb, 0x76, + 0x89, 0xc5, 0x74, 0x1e, 0x62, 0x90, 0x21, 0xa5, 0xd3, 0x18, 0x22, 0x73, 0x08, 0x8b, 0x09, 0x08, + 0xfe, 0x57, 0x82, 0x3c, 0x4b, 0x87, 0x61, 0x7b, 0x23, 0xcf, 0x5d, 0x96, 0x63, 0xb1, 0x48, 0xee, + 0xcb, 0xa8, 0x24, 0x72, 0x58, 0x92, 0xbd, 0xe5, 0x92, 0xa4, 0xf2, 0x47, 0xcf, 0xa0, 0x38, 0x11, + 0x5e, 0xf7, 0x95, 0x43, 0xe9, 0xb8, 0x54, 0xbb, 0x9f, 0x24, 0x44, 0x79, 0xd8, 0x31, 0x0a, 0xbf, + 0x4a, 0x54, 0xec, 0xc2, 0x7a, 0x65, 0xb5, 0x2f, 0x2d, 0x7e, 0x83, 0xbf, 0xb4, 0x5b, 0xad, 0xf6, + 0xa5, 0x69, 0xf3, 0x1b, 0x3c, 0xd5, 0x2d, 0xa3, 0x61, 0xe8, 0x5d, 0x56, 0x34, 0x80, 0xad, 0x96, + 0xa9, 0x1b, 0xa6, 0xad, 0x29, 0x0c, 0xd8, 0xa9, 0x5f, 0x74, 0x0d, 0x46, 0xcb, 0xe3, 0x3f, 0x24, + 0x28, 0x9c, 0x8e, 0xe7, 0x01, 0x25, 0x33, 0xf4, 0x35, 0xa8, 0x9e, 0xef, 0x12, 0x96, 0x94, 0x72, + 0x5c, 0xaa, 0x3d, 0x4c, 0xfa, 0x10, 0x98, 0x30, 0x81, 0xc0, 0xf4, 0xe8, 0xec, 0xc6, 0xe6, 0xe0, + 0x72, 0x13, 0x60, 0x11, 0x44, 0x1a, 0x28, 0x57, 0xe4, 0x46, 0x94, 0x85, 0xfd, 0x44, 0x8f, 0x41, + 0xbd, 0x76, 0xc6, 0x73, 0x5e, 0x8e, 0x52, 0x4d, 0x5b, 0x2e, 0x87, 0xcd, 0xb7, 0xbf, 0x93, 0x4f, + 0x24, 0x7c, 0x02, 0x1a, 0x0b, 0x35, 0xbc, 0xf7, 0x7e, 0xfc, 0xc2, 0x1e, 0x41, 0x9e, 0x1d, 0x14, + 0x4a, 0x66, 0xd1, 0xc3, 0x5d, 0xdc, 0x04, 0x24, 0x2c, 0x36, 0xfd, 0x91, 0x17, 0x35, 0xcc, 0x1d, + 0x90, 0x47, 0xae, 0x30, 0x23, 0x8f, 0xdc, 0x58, 0x4b, 0xde, 0xa8, 0xf5, 0x19, 0xdc, 0x13, 0x5a, + 0x2d, 0xe2, 0x5c, 0x93, 0x35, 0x62, 0xd8, 0x88, 0x61, 0x29, 0xbf, 0x4f, 0xa1, 0xd0, 0xe7, 0x61, + 0x61, 0xf9, 0x5e, 0x46, 0x1d, 0xed, 0x08, 0x83, 0xff, 0x93, 0xe0, 0xbe, 0x08, 0x5e, 0x3a, 0xb4, + 0x3f, 0x8c, 0x75, 0x5e, 0x80, 0x4a, 0xae, 0x89, 0x47, 0x45, 0x67, 0x3d, 0xce, 0x50, 0x49, 0x11, + 0x2a, 0x26, 0x43, 0xdb, 0x9c, 0x24, 0xcc, 0xca, 0x2b, 0x99, 0x2b, 0x9b, 0x32, 0x4f, 0x7a, 0xcf, + 0x7f, 0x80, 0xf7, 0xe7, 0xa0, 0x86, 0x87, 0xa6, 0x5f, 0x62, 0x11, 0xf2, 0xcd, 0x76, 0xc3, 0xe2, + 0x6d, 0xdb, 0x32, 0xf5, 0x37, 0xe2, 0x05, 0x5e, 0x9c, 0x87, 0xaf, 0x51, 0xc1, 0x0f, 0x01, 0xce, + 0x08, 0x8d, 0xca, 0xba, 0xf2, 0x62, 0xf0, 0xb7, 0x50, 0x0a, 0xf7, 0x45, 0x21, 0xbe, 0x88, 0x1e, + 0x90, 0x24, 0xda, 0x83, 0x4f, 0xcf, 0x4a, 0x34, 0x3d, 0x2b, 0xba, 0x77, 0x23, 0x1e, 0x11, 0x6e, + 0x02, 0x74, 0x36, 0x48, 0x2f, 0xb4, 0xe4, 0xdb, 0xb5, 0x8e, 0x60, 0xd7, 0x20, 0x63, 0x42, 0xc9, + 0x7a, 0xa7, 0x87, 0xb0, 0x23, 0xee, 0x60, 0x1d, 0xe2, 0x1f, 0x09, 0x76, 0xd3, 0xf7, 0xfa, 0x3d, + 0x14, 0xfa, 0xfe, 0x64, 0xe2, 0x78, 0xae, 0xb8, 0xd9, 0xa3, 0x64, 0x8d, 0xd3, 0x57, 0x7a, 0xca, + 0x81, 0x76, 0xc4, 0x88, 0x0e, 0x90, 0x33, 0x32, 0x52, 0x6e, 0xcf, 0xe8, 0x09, 0x14, 0x84, 0x62, + 0xfa, 0xc6, 0x0a, 0xa0, 0x74, 0xcc, 0xae, 0x26, 0xb1, 0x5b, 0x32, 0xcc, 0x96, 0xc9, 0x66, 0x46, + 0xed, 0xcf, 0x2d, 0x80, 0xd7, 0xb1, 0x31, 0xf4, 0x1b, 0xdc, 0x5d, 0x9a, 0xe7, 0x08, 0xdf, 0xfe, + 0xfd, 0x29, 0x7f, 0xfa, 0x01, 0x1f, 0x04, 0x9c, 0x43, 0x2f, 0xa1, 0x18, 0x35, 0x3e, 0xda, 0x5b, + 0x49, 0xc1, 0x64, 0x9f, 0xc7, 0xf2, 0xc7, 0xcb, 0x52, 0xc9, 0xb6, 0xc3, 0x39, 0x74, 0x06, 0xa5, + 0xc4, 0x08, 0x40, 0x59, 0xe3, 0x2b, 0x31, 0x1b, 0xca, 0x6b, 0x8e, 0xc1, 0x39, 0xd4, 0x80, 0x9d, + 0x64, 0xff, 0xa3, 0x4f, 0x32, 0x94, 0x92, 0x93, 0x61, 0x83, 0x54, 0x3d, 0xf6, 0xb4, 0x31, 0xb5, + 0xac, 0x13, 0x96, 0xb2, 0x6b, 0xc5, 0xa6, 0xc2, 0x27, 0xb2, 0x56, 0xea, 0xf0, 0xb6, 0x39, 0x81, + 0x73, 0xcf, 0x24, 0x74, 0x02, 0xca, 0x19, 0xa1, 0x28, 0xf5, 0x6d, 0x5a, 0xf4, 0x64, 0xf9, 0xc1, + 0x4a, 0x3c, 0xf6, 0xf1, 0x0d, 0x28, 0x9d, 0x65, 0xe6, 0xa2, 0xe5, 0x36, 0x94, 0xe2, 0x07, 0xd8, + 0xe2, 0xed, 0x84, 0x0e, 0x92, 0xdc, 0x54, 0x8b, 0x6d, 0xa0, 0xff, 0x0c, 0x2a, 0x4f, 0x7c, 0x3f, + 0xa3, 0x5d, 0x38, 0xf9, 0x60, 0x6d, 0x23, 0x85, 0x39, 0xbf, 0x80, 0x62, 0xc7, 0x73, 0xa6, 0xc1, + 0xd0, 0xa7, 0x6b, 0xab, 0xb7, 0xf6, 0xfc, 0x97, 0x4f, 0x7f, 0x7d, 0x32, 0x18, 0xd1, 0xe1, 0xbc, + 0x57, 0xe9, 0xfb, 0x93, 0xea, 0xc4, 0x0f, 0xe6, 0x57, 0x4e, 0xb5, 0x37, 0x76, 0x02, 0x5a, 0xcd, + 0xf8, 0x6b, 0xd8, 0xdb, 0x0a, 0x83, 0x5f, 0xfd, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x2d, 0xa1, 0xdf, + 0xcd, 0x38, 0x0a, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -956,7 +993,6 @@ const _ = grpc.SupportPackageIsVersion4 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ManagementClient interface { NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) - // rpc NodeState (google.protobuf.Empty) returns (NodeStateResponse) {} NodeInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeInfoResponse, error) ClusterJoin(ctx context.Context, in *ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) ClusterLeave(ctx context.Context, in *ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) @@ -1125,7 +1161,6 @@ func (c *managementClient) Snapshot(ctx context.Context, in *empty.Empty, opts . // ManagementServer is the server API for Management service. type ManagementServer interface { NodeHealthCheck(context.Context, *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) - // rpc NodeState (google.protobuf.Empty) returns (NodeStateResponse) {} NodeInfo(context.Context, *empty.Empty) (*NodeInfoResponse, error) ClusterJoin(context.Context, *ClusterJoinRequest) (*empty.Empty, error) ClusterLeave(context.Context, *ClusterLeaveRequest) (*empty.Empty, error) diff --git a/protobuf/management/management.proto b/protobuf/management/management.proto index 36cca52..2f0adf7 100644 --- a/protobuf/management/management.proto +++ b/protobuf/management/management.proto @@ -64,8 +64,15 @@ message Metadata { } message Node { + enum State { + UNKNOWN = 0; + FOLLOWER = 1; + CANDIDATE = 2; + LEADER = 3; + SHUTDOWN = 4; + } string bind_address = 1; - string state = 2; + State state = 2; Metadata metadata = 3; } From 1ebd478eeb7c3c38ffcc6b5c2978e2691b82e1f9 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Wed, 31 Jul 2019 21:50:11 +0900 Subject: [PATCH 17/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index f9fbeed..f527241 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -19,6 +19,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Change protobuf #87 - Change the cluster watching method #90 - Change cluster watch command for manager #92 +- Change node state to enum from string #93 ## [v0.7.1] - 2019-07-18 From 22221f8aa5c6e2d2542e0cdeb56318ff88edea33 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Thu, 1 Aug 2019 12:40:44 +0900 Subject: [PATCH 18/76] Change node info structure (#94) --- cmd/blast/manager_cluster_watch.go | 1 - cmd/blast/manager_start.go | 4 +- dispatcher/server_test.go | 12 +- manager/grpc_client.go | 3 +- manager/grpc_service.go | 11 +- manager/raft_fsm.go | 12 +- manager/raft_fsm_test.go | 56 +++++----- manager/raft_server.go | 49 ++++---- manager/server.go | 8 +- manager/server_test.go | 98 +++++++++++----- protobuf/management/management.pb.go | 160 +++++++++++++-------------- protobuf/management/management.proto | 15 ++- 12 files changed, 233 insertions(+), 196 deletions(-) diff --git a/cmd/blast/manager_cluster_watch.go b/cmd/blast/manager_cluster_watch.go index bb5ae7c..775350b 100644 --- a/cmd/blast/manager_cluster_watch.go +++ b/cmd/blast/manager_cluster_watch.go @@ -46,7 +46,6 @@ func managerClusterWatch(c *cli.Context) error { } resp := &management.ClusterWatchResponse{ Event: 0, - Id: "", Node: nil, Cluster: cluster, } diff --git a/cmd/blast/manager_start.go b/cmd/blast/manager_start.go index af0be52..ee2c93c 100644 --- a/cmd/blast/manager_start.go +++ b/cmd/blast/manager_start.go @@ -93,7 +93,9 @@ func managerStart(c *cli.Context) error { ) node := &management.Node{ + Id: nodeId, BindAddress: nodeAddr, + State: management.Node_UNKNOWN, Metadata: &management.Metadata{ GrpcAddress: grpcAddr, HttpAddress: httpAddr, @@ -120,7 +122,7 @@ func managerStart(c *cli.Context) error { IndexStorageType: indexStorageType, } - svr, err := manager.NewServer(peerGrpcAddr, nodeId, node, dataDir, raftStorageType, indexConfig, logger.Named(nodeId), grpcLogger.Named(nodeId), httpLogger) + svr, err := manager.NewServer(peerGrpcAddr, node, dataDir, raftStorageType, indexConfig, logger.Named(nodeId), grpcLogger.Named(nodeId), httpLogger) if err != nil { return err } diff --git a/dispatcher/server_test.go b/dispatcher/server_test.go index 64a5ca3..2aae862 100644 --- a/dispatcher/server_test.go +++ b/dispatcher/server_test.go @@ -48,6 +48,7 @@ func TestServer_Start(t *testing.T) { managerRaftStorageType1 := "boltdb" managerNode1 := &management.Node{ + Id: managerNodeId1, BindAddress: managerBindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -62,7 +63,7 @@ func TestServer_Start(t *testing.T) { } // create server - managerServer1, err := manager.NewServer(managerPeerGrpcAddress1, managerNodeId1, managerNode1, managerDataDir1, managerRaftStorageType1, managerIndexConfig1, logger, grpcLogger, httpAccessLogger) + managerServer1, err := manager.NewServer(managerPeerGrpcAddress1, managerNode1, managerDataDir1, managerRaftStorageType1, managerIndexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { if managerServer1 != nil { managerServer1.Stop() @@ -84,6 +85,7 @@ func TestServer_Start(t *testing.T) { managerRaftStorageType2 := "boltdb" managerNode2 := &management.Node{ + Id: managerNodeId2, BindAddress: managerBindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -98,7 +100,7 @@ func TestServer_Start(t *testing.T) { } // create server - managerServer2, err := manager.NewServer(managerPeerGrpcAddress2, managerNodeId2, managerNode2, managerDataDir2, managerRaftStorageType2, managerIndexConfig2, logger, grpcLogger, httpAccessLogger) + managerServer2, err := manager.NewServer(managerPeerGrpcAddress2, managerNode2, managerDataDir2, managerRaftStorageType2, managerIndexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { if managerServer2 != nil { managerServer2.Stop() @@ -120,6 +122,7 @@ func TestServer_Start(t *testing.T) { managerRaftStorageType3 := "boltdb" managerNode3 := &management.Node{ + Id: managerNodeId3, BindAddress: managerBindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -134,7 +137,7 @@ func TestServer_Start(t *testing.T) { } // create server - managerServer3, err := manager.NewServer(managerPeerGrpcAddress3, managerNodeId3, managerNode3, managerDataDir3, managerRaftStorageType3, managerIndexConfig3, logger, grpcLogger, httpAccessLogger) + managerServer3, err := manager.NewServer(managerPeerGrpcAddress3, managerNode3, managerDataDir3, managerRaftStorageType3, managerIndexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { if managerServer3 != nil { managerServer3.Stop() @@ -166,6 +169,7 @@ func TestServer_Start(t *testing.T) { expManagerCluster1 := &management.Cluster{ Nodes: map[string]*management.Node{ managerNodeId1: { + Id: managerNodeId1, BindAddress: managerBindAddress1, State: management.Node_LEADER, Metadata: &management.Metadata{ @@ -174,6 +178,7 @@ func TestServer_Start(t *testing.T) { }, }, managerNodeId2: { + Id: managerNodeId2, BindAddress: managerBindAddress2, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ @@ -182,6 +187,7 @@ func TestServer_Start(t *testing.T) { }, }, managerNodeId3: { + Id: managerNodeId3, BindAddress: managerBindAddress3, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ diff --git a/manager/grpc_client.go b/manager/grpc_client.go index eff4dd6..af4c46e 100644 --- a/manager/grpc_client.go +++ b/manager/grpc_client.go @@ -120,9 +120,8 @@ func (c *GRPCClient) NodeInfo(opts ...grpc.CallOption) (*management.Node, error) return resp.Node, nil } -func (c *GRPCClient) ClusterJoin(id string, node *management.Node, opts ...grpc.CallOption) error { +func (c *GRPCClient) ClusterJoin(node *management.Node, opts ...grpc.CallOption) error { req := &management.ClusterJoinRequest{ - Id: id, Node: node, } diff --git a/manager/grpc_service.go b/manager/grpc_service.go index 6869c6c..c763412 100644 --- a/manager/grpc_service.go +++ b/manager/grpc_service.go @@ -214,7 +214,6 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { // notify the cluster changes clusterResp := &management.ClusterWatchResponse{ Event: management.ClusterWatchResponse_UPDATE, - Id: id, Node: node, Cluster: snapshotCluster, } @@ -227,7 +226,6 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { // notify the cluster changes clusterResp := &management.ClusterWatchResponse{ Event: management.ClusterWatchResponse_JOIN, - Id: id, Node: node, Cluster: snapshotCluster, } @@ -244,7 +242,6 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { // notify the cluster changes clusterResp := &management.ClusterWatchResponse{ Event: management.ClusterWatchResponse_LEAVE, - Id: id, Node: node, Cluster: snapshotCluster, } @@ -364,9 +361,9 @@ func (s *GRPCService) NodeInfo(ctx context.Context, req *empty.Empty) (*manageme }, nil } -func (s *GRPCService) setNode(id string, node *management.Node) error { +func (s *GRPCService) setNode(node *management.Node) error { if s.raftServer.IsLeader() { - err := s.raftServer.SetNode(id, node) + err := s.raftServer.SetNode(node) if err != nil { s.logger.Error(err.Error()) return err @@ -378,7 +375,7 @@ func (s *GRPCService) setNode(id string, node *management.Node) error { s.logger.Error(err.Error()) return err } - err = client.ClusterJoin(id, node) + err = client.ClusterJoin(node) if err != nil { s.logger.Error(err.Error()) return err @@ -391,7 +388,7 @@ func (s *GRPCService) setNode(id string, node *management.Node) error { func (s *GRPCService) ClusterJoin(ctx context.Context, req *management.ClusterJoinRequest) (*empty.Empty, error) { resp := &empty.Empty{} - err := s.setNode(req.Id, req.Node) + err := s.setNode(req.Node) if err != nil { s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) diff --git a/manager/raft_fsm.go b/manager/raft_fsm.go index 75ceb1a..6eae3d5 100644 --- a/manager/raft_fsm.go +++ b/manager/raft_fsm.go @@ -60,7 +60,7 @@ func (f *RaftFSM) Stop() error { return nil } -func (f *RaftFSM) GetNodeConfig(nodeId string) (*management.Node, error) { +func (f *RaftFSM) GetNode(nodeId string) (*management.Node, error) { f.clusterMutex.RLock() defer f.clusterMutex.RUnlock() @@ -72,16 +72,16 @@ func (f *RaftFSM) GetNodeConfig(nodeId string) (*management.Node, error) { return node, nil } -func (f *RaftFSM) SetNodeConfig(nodeId string, node *management.Node) error { +func (f *RaftFSM) SetNode(node *management.Node) error { f.clusterMutex.RLock() defer f.clusterMutex.RUnlock() - f.cluster.Nodes[nodeId] = node + f.cluster.Nodes[node.Id] = node return nil } -func (f *RaftFSM) DeleteNodeConfig(nodeId string) error { +func (f *RaftFSM) DeleteNode(nodeId string) error { f.clusterMutex.RLock() defer f.clusterMutex.RUnlock() @@ -183,7 +183,7 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - err = f.SetNodeConfig(data["node_id"].(string), node) + err = f.SetNode(node) if err != nil { f.logger.Error(err.Error()) return &fsmResponse{error: err} @@ -196,7 +196,7 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - err = f.DeleteNodeConfig(data["node_id"].(string)) + err = f.DeleteNode(data["id"].(string)) return &fsmResponse{error: err} case setKeyValue: var data map[string]interface{} diff --git a/manager/raft_fsm_test.go b/manager/raft_fsm_test.go index 99d814c..ca0ad48 100644 --- a/manager/raft_fsm_test.go +++ b/manager/raft_fsm_test.go @@ -53,9 +53,9 @@ func TestRaftFSM_GetNode(t *testing.T) { t.Fatalf("%v", err) } - _ = fsm.SetNodeConfig( - "node1", + _ = fsm.SetNode( &management.Node{ + Id: "node1", BindAddress: "2100", State: management.Node_LEADER, Metadata: &management.Metadata{ @@ -64,9 +64,9 @@ func TestRaftFSM_GetNode(t *testing.T) { }, }, ) - _ = fsm.SetNodeConfig( - "node2", + _ = fsm.SetNode( &management.Node{ + Id: "node2", BindAddress: "2110", State: management.Node_FOLLOWER, Metadata: &management.Metadata{ @@ -75,9 +75,9 @@ func TestRaftFSM_GetNode(t *testing.T) { }, }, ) - _ = fsm.SetNodeConfig( - "node3", + _ = fsm.SetNode( &management.Node{ + Id: "node3", BindAddress: "2120", State: management.Node_FOLLOWER, Metadata: &management.Metadata{ @@ -87,12 +87,13 @@ func TestRaftFSM_GetNode(t *testing.T) { }, ) - val1, err := fsm.GetNodeConfig("node2") + val1, err := fsm.GetNode("node2") if err != nil { t.Fatalf("%v", err) } exp1 := &management.Node{ + Id: "node2", BindAddress: "2110", State: management.Node_FOLLOWER, Metadata: &management.Metadata{ @@ -137,9 +138,9 @@ func TestRaftFSM_SetNode(t *testing.T) { t.Fatalf("%v", err) } - _ = fsm.SetNodeConfig( - "node1", + _ = fsm.SetNode( &management.Node{ + Id: "node1", BindAddress: "2100", State: management.Node_LEADER, Metadata: &management.Metadata{ @@ -148,9 +149,9 @@ func TestRaftFSM_SetNode(t *testing.T) { }, }, ) - _ = fsm.SetNodeConfig( - "node2", + _ = fsm.SetNode( &management.Node{ + Id: "node2", BindAddress: "2110", State: management.Node_FOLLOWER, Metadata: &management.Metadata{ @@ -159,9 +160,9 @@ func TestRaftFSM_SetNode(t *testing.T) { }, }, ) - _ = fsm.SetNodeConfig( - "node3", + _ = fsm.SetNode( &management.Node{ + Id: "node3", BindAddress: "2120", State: management.Node_FOLLOWER, Metadata: &management.Metadata{ @@ -171,11 +172,12 @@ func TestRaftFSM_SetNode(t *testing.T) { }, ) - val1, err := fsm.GetNodeConfig("node2") + val1, err := fsm.GetNode("node2") if err != nil { t.Fatalf("%v", err) } exp1 := &management.Node{ + Id: "node2", BindAddress: "2110", State: management.Node_FOLLOWER, Metadata: &management.Metadata{ @@ -188,9 +190,9 @@ func TestRaftFSM_SetNode(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", exp1, act1) } - _ = fsm.SetNodeConfig( - "node2", + _ = fsm.SetNode( &management.Node{ + Id: "node2", BindAddress: "2110", State: management.Node_SHUTDOWN, Metadata: &management.Metadata{ @@ -200,11 +202,12 @@ func TestRaftFSM_SetNode(t *testing.T) { }, ) - val2, err := fsm.GetNodeConfig("node2") + val2, err := fsm.GetNode("node2") if err != nil { t.Fatalf("%v", err) } exp2 := &management.Node{ + Id: "node2", BindAddress: "2110", State: management.Node_SHUTDOWN, Metadata: &management.Metadata{ @@ -248,9 +251,9 @@ func TestRaftFSM_DeleteNode(t *testing.T) { t.Fatalf("%v", err) } - _ = fsm.SetNodeConfig( - "node1", + _ = fsm.SetNode( &management.Node{ + Id: "node1", BindAddress: "2100", State: management.Node_LEADER, Metadata: &management.Metadata{ @@ -259,9 +262,9 @@ func TestRaftFSM_DeleteNode(t *testing.T) { }, }, ) - _ = fsm.SetNodeConfig( - "node2", + _ = fsm.SetNode( &management.Node{ + Id: "node2", BindAddress: "2110", State: management.Node_FOLLOWER, Metadata: &management.Metadata{ @@ -270,9 +273,9 @@ func TestRaftFSM_DeleteNode(t *testing.T) { }, }, ) - _ = fsm.SetNodeConfig( - "node3", + _ = fsm.SetNode( &management.Node{ + Id: "node3", BindAddress: "2120", State: management.Node_FOLLOWER, Metadata: &management.Metadata{ @@ -282,11 +285,12 @@ func TestRaftFSM_DeleteNode(t *testing.T) { }, ) - val1, err := fsm.GetNodeConfig("node2") + val1, err := fsm.GetNode("node2") if err != nil { t.Fatalf("%v", err) } exp1 := &management.Node{ + Id: "node2", BindAddress: "2110", State: management.Node_FOLLOWER, Metadata: &management.Metadata{ @@ -299,12 +303,12 @@ func TestRaftFSM_DeleteNode(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", exp1, act1) } - err = fsm.DeleteNodeConfig("node2") + err = fsm.DeleteNode("node2") if err != nil { t.Fatalf("%v", err) } - val2, err := fsm.GetNodeConfig("node2") + val2, err := fsm.GetNode("node2") if err == nil { t.Fatalf("expected error: %v", err) } diff --git a/manager/raft_server.go b/manager/raft_server.go index e1eee46..257052f 100644 --- a/manager/raft_server.go +++ b/manager/raft_server.go @@ -36,7 +36,7 @@ import ( ) type RaftServer struct { - nodeId string + //nodeId string node *management.Node dataDir string raftStorageType string @@ -49,9 +49,9 @@ type RaftServer struct { mu sync.RWMutex } -func NewRaftServer(nodeId string, node *management.Node, dataDir string, raftStorageType string, indexConfig *config.IndexConfig, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { +func NewRaftServer(node *management.Node, dataDir string, raftStorageType string, indexConfig *config.IndexConfig, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { return &RaftServer{ - nodeId: nodeId, + //nodeId: nodeId, node: node, dataDir: dataDir, raftStorageType: raftStorageType, @@ -79,9 +79,9 @@ func (s *RaftServer) Start() error { return err } - s.logger.Info("create Raft config", zap.String("node_id", s.nodeId)) + s.logger.Info("create Raft config", zap.String("node_id", s.node.Id)) raftConfig := raft.DefaultConfig() - raftConfig.LocalID = raft.ServerID(s.nodeId) + raftConfig.LocalID = raft.ServerID(s.node.Id) raftConfig.SnapshotThreshold = 1024 raftConfig.LogOutput = ioutil.Discard @@ -207,8 +207,8 @@ func (s *RaftServer) Start() error { } // set node config - s.logger.Info("register its own node config", zap.String("node_id", s.nodeId), zap.Any("node", s.node)) - err = s.setNode(s.nodeId, s.node) + s.logger.Info("register its own node config", zap.Any("node", s.node)) + err = s.setNode(s.node) if err != nil { s.logger.Fatal(err.Error()) return err @@ -292,7 +292,7 @@ func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { } func (s *RaftServer) NodeID() string { - return s.nodeId + return s.node.Id } func (s *RaftServer) Stats() map[string]string { @@ -318,7 +318,7 @@ func (s *RaftServer) WaitForDetectLeader(timeout time.Duration) error { } func (s *RaftServer) getNode(nodeId string) (*management.Node, error) { - nodeConfig, err := s.fsm.GetNodeConfig(nodeId) + nodeConfig, err := s.fsm.GetNode(nodeId) if err != nil { s.logger.Debug(err.Error(), zap.String("id", nodeId)) return nil, err @@ -327,34 +327,33 @@ func (s *RaftServer) getNode(nodeId string) (*management.Node, error) { return nodeConfig, nil } -func (s *RaftServer) setNode(nodeId string, node *management.Node) error { +func (s *RaftServer) setNode(node *management.Node) error { msg, err := newMessage( setNode, map[string]interface{}{ - "node_id": nodeId, - "node": node, + "node": node, }, ) if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId), zap.Any("node", node)) + s.logger.Error(err.Error(), zap.Any("node", node)) return err } msgBytes, err := json.Marshal(msg) if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId), zap.Any("node", node)) + s.logger.Error(err.Error(), zap.Any("node", node)) return err } f := s.raft.Apply(msgBytes, 10*time.Second) err = f.Error() if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId), zap.Any("node", node)) + s.logger.Error(err.Error(), zap.Any("node", node)) return err } err = f.Response().(*fsmResponse).error if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId), zap.Any("node", node)) + s.logger.Error(err.Error(), zap.Any("node", node)) return err } @@ -365,7 +364,7 @@ func (s *RaftServer) deleteNode(nodeId string) error { msg, err := newMessage( deleteNode, map[string]interface{}{ - "node_id": nodeId, + "id": nodeId, }, ) if err != nil { @@ -417,7 +416,7 @@ func (s *RaftServer) GetNode(id string) (*management.Node, error) { return node, nil } -func (s *RaftServer) SetNode(nodeId string, node *management.Node) error { +func (s *RaftServer) SetNode(node *management.Node) error { if !s.IsLeader() { s.logger.Warn(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) return raft.ErrNotLeader @@ -431,8 +430,8 @@ func (s *RaftServer) SetNode(nodeId string, node *management.Node) error { } for _, server := range cf.Configuration().Servers { - if server.ID == raft.ServerID(nodeId) { - s.logger.Info("node already joined the cluster", zap.String("id", nodeId)) + if server.ID == raft.ServerID(node.Id) { + s.logger.Info("node already joined the cluster", zap.Any("id", node.Id)) return nil } } @@ -444,18 +443,18 @@ func (s *RaftServer) SetNode(nodeId string, node *management.Node) error { } // add node to Raft cluster - s.logger.Info("join the node to the raft cluster", zap.String("id", nodeId), zap.Any("bind_address", node.BindAddress)) - f := s.raft.AddVoter(raft.ServerID(nodeId), raft.ServerAddress(node.BindAddress), 0, 0) + s.logger.Info("join the node to the raft cluster", zap.String("id", node.Id), zap.Any("bind_address", node.BindAddress)) + f := s.raft.AddVoter(raft.ServerID(node.Id), raft.ServerAddress(node.BindAddress), 0, 0) err = f.Error() if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("bind_address", node.BindAddress)) + s.logger.Error(err.Error(), zap.String("id", node.Id), zap.String("bind_address", node.BindAddress)) return err } // set node config - err = s.setNode(nodeId, node) + err = s.setNode(node) if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId), zap.Any("node", node)) + s.logger.Error(err.Error(), zap.Any("node", node)) return err } diff --git a/manager/server.go b/manager/server.go index bb36b92..df4bba7 100644 --- a/manager/server.go +++ b/manager/server.go @@ -23,7 +23,6 @@ import ( type Server struct { peerGrpcAddr string - nodeId string node *management.Node dataDir string raftStorageType string @@ -39,10 +38,9 @@ type Server struct { httpServer *HTTPServer } -func NewServer(peerGrpcAddr string, nodeId string, node *management.Node, dataDir string, raftStorageType string, indexConfig *config.IndexConfig, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { +func NewServer(peerGrpcAddr string, node *management.Node, dataDir string, raftStorageType string, indexConfig *config.IndexConfig, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { return &Server{ peerGrpcAddr: peerGrpcAddr, - nodeId: nodeId, node: node, dataDir: dataDir, raftStorageType: raftStorageType, @@ -61,7 +59,7 @@ func (s *Server) Start() { s.logger.Info("bootstrap", zap.Bool("bootstrap", bootstrap)) // create raft server - s.raftServer, err = NewRaftServer(s.nodeId, s.node, s.dataDir, s.raftStorageType, s.indexConfig, bootstrap, s.logger) + s.raftServer, err = NewRaftServer(s.node, s.dataDir, s.raftStorageType, s.indexConfig, bootstrap, s.logger) if err != nil { s.logger.Fatal(err.Error()) return @@ -143,7 +141,7 @@ func (s *Server) Start() { return } - err = client.ClusterJoin(s.nodeId, s.node) + err = client.ClusterJoin(s.node) if err != nil { s.logger.Fatal(err.Error()) return diff --git a/manager/server_test.go b/manager/server_test.go index d1c30b8..1bf019b 100644 --- a/manager/server_test.go +++ b/manager/server_test.go @@ -45,6 +45,7 @@ func TestServer_Start(t *testing.T) { raftStorageType := "boltdb" node := &management.Node{ + Id: nodeId, BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -59,7 +60,7 @@ func TestServer_Start(t *testing.T) { } // create server - server, err := NewServer(peerGrpcAddress, nodeId, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -92,6 +93,7 @@ func TestServer_HealthCheck(t *testing.T) { raftStorageType := "boltdb" node := &management.Node{ + Id: nodeId, BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -106,7 +108,7 @@ func TestServer_HealthCheck(t *testing.T) { } // create server - server, err := NewServer(peerGrpcAddress, nodeId, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -186,6 +188,7 @@ func TestServer_GetNode(t *testing.T) { raftStorageType := "boltdb" node := &management.Node{ + Id: nodeId, BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -200,7 +203,7 @@ func TestServer_GetNode(t *testing.T) { } // create server - server, err := NewServer(peerGrpcAddress, nodeId, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -236,6 +239,7 @@ func TestServer_GetNode(t *testing.T) { t.Fatalf("%v", err) } expNodeInfo := &management.Node{ + Id: nodeId, BindAddress: bindAddress, State: management.Node_LEADER, Metadata: &management.Metadata{ @@ -265,6 +269,7 @@ func TestServer_GetCluster(t *testing.T) { raftStorageType := "boltdb" node := &management.Node{ + Id: nodeId, BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -279,7 +284,7 @@ func TestServer_GetCluster(t *testing.T) { } // create server - server, err := NewServer(peerGrpcAddress, nodeId, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -317,6 +322,7 @@ func TestServer_GetCluster(t *testing.T) { expCluster := &management.Cluster{ Nodes: map[string]*management.Node{ nodeId: { + Id: nodeId, BindAddress: bindAddress, State: management.Node_LEADER, Metadata: &management.Metadata{ @@ -348,6 +354,7 @@ func TestServer_SetState(t *testing.T) { raftStorageType := "boltdb" node := &management.Node{ + Id: nodeId, BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -362,7 +369,7 @@ func TestServer_SetState(t *testing.T) { } // create server - server, err := NewServer(peerGrpcAddress, nodeId, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -429,6 +436,7 @@ func TestServer_GetState(t *testing.T) { raftStorageType := "boltdb" node := &management.Node{ + Id: nodeId, BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -443,7 +451,7 @@ func TestServer_GetState(t *testing.T) { } // create server - server, err := NewServer(peerGrpcAddress, nodeId, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -510,6 +518,7 @@ func TestServer_DeleteState(t *testing.T) { raftStorageType := "boltdb" node := &management.Node{ + Id: nodeId, BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -524,7 +533,7 @@ func TestServer_DeleteState(t *testing.T) { } // create server - server, err := NewServer(peerGrpcAddress, nodeId, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -612,6 +621,7 @@ func TestCluster_Start(t *testing.T) { raftStorageType1 := "boltdb" node1 := &management.Node{ + Id: nodeId1, BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -626,7 +636,7 @@ func TestCluster_Start(t *testing.T) { } // create server - server1, err := NewServer(peerGrpcAddress1, nodeId1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) + server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -648,6 +658,7 @@ func TestCluster_Start(t *testing.T) { raftStorageType2 := "boltdb" node2 := &management.Node{ + Id: nodeId2, BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -662,7 +673,7 @@ func TestCluster_Start(t *testing.T) { } // create server - server2, err := NewServer(peerGrpcAddress2, nodeId2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) + server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -684,6 +695,7 @@ func TestCluster_Start(t *testing.T) { raftStorageType3 := "boltdb" node3 := &management.Node{ + Id: nodeId3, BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -698,7 +710,7 @@ func TestCluster_Start(t *testing.T) { } // create server - server3, err := NewServer(peerGrpcAddress3, nodeId3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) + server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() @@ -731,6 +743,7 @@ func TestCluster_HealthCheck(t *testing.T) { raftStorageType1 := "boltdb" node1 := &management.Node{ + Id: nodeId1, BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -745,7 +758,7 @@ func TestCluster_HealthCheck(t *testing.T) { } // create server - server1, err := NewServer(peerGrpcAddress1, nodeId1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) + server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -767,6 +780,7 @@ func TestCluster_HealthCheck(t *testing.T) { raftStorageType2 := "boltdb" node2 := &management.Node{ + Id: nodeId2, BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -781,7 +795,7 @@ func TestCluster_HealthCheck(t *testing.T) { } // create server - server2, err := NewServer(peerGrpcAddress2, nodeId2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) + server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -803,6 +817,7 @@ func TestCluster_HealthCheck(t *testing.T) { raftStorageType3 := "boltdb" node3 := &management.Node{ + Id: nodeId3, BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -817,7 +832,7 @@ func TestCluster_HealthCheck(t *testing.T) { } // create server - server3, err := NewServer(peerGrpcAddress3, nodeId3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) + server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() @@ -973,6 +988,7 @@ func TestCluster_GetNode(t *testing.T) { raftStorageType1 := "boltdb" node1 := &management.Node{ + Id: nodeId1, BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -987,7 +1003,7 @@ func TestCluster_GetNode(t *testing.T) { } // create server - server1, err := NewServer(peerGrpcAddress1, nodeId1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) + server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -1009,6 +1025,7 @@ func TestCluster_GetNode(t *testing.T) { raftStorageType2 := "boltdb" node2 := &management.Node{ + Id: nodeId2, BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -1023,7 +1040,7 @@ func TestCluster_GetNode(t *testing.T) { } // create server - server2, err := NewServer(peerGrpcAddress2, nodeId2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) + server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -1045,6 +1062,7 @@ func TestCluster_GetNode(t *testing.T) { raftStorageType3 := "boltdb" node3 := &management.Node{ + Id: nodeId3, BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -1059,7 +1077,7 @@ func TestCluster_GetNode(t *testing.T) { } // create server - server3, err := NewServer(peerGrpcAddress3, nodeId3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) + server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() @@ -1104,6 +1122,7 @@ func TestCluster_GetNode(t *testing.T) { t.Fatalf("%v", err) } expNode11 := &management.Node{ + Id: nodeId1, BindAddress: bindAddress1, State: management.Node_LEADER, Metadata: &management.Metadata{ @@ -1121,6 +1140,7 @@ func TestCluster_GetNode(t *testing.T) { t.Fatalf("%v", err) } expNode21 := &management.Node{ + Id: nodeId2, BindAddress: bindAddress2, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ @@ -1138,6 +1158,7 @@ func TestCluster_GetNode(t *testing.T) { t.Fatalf("%v", err) } expNode31 := &management.Node{ + Id: nodeId3, BindAddress: bindAddress3, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ @@ -1167,6 +1188,7 @@ func TestCluster_GetCluster(t *testing.T) { raftStorageType1 := "boltdb" node1 := &management.Node{ + Id: nodeId1, BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -1181,7 +1203,7 @@ func TestCluster_GetCluster(t *testing.T) { } // create server - server1, err := NewServer(peerGrpcAddress1, nodeId1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) + server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -1203,6 +1225,7 @@ func TestCluster_GetCluster(t *testing.T) { raftStorageType2 := "boltdb" node2 := &management.Node{ + Id: nodeId2, BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -1217,7 +1240,7 @@ func TestCluster_GetCluster(t *testing.T) { } // create server - server2, err := NewServer(peerGrpcAddress2, nodeId2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) + server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -1239,6 +1262,7 @@ func TestCluster_GetCluster(t *testing.T) { raftStorageType3 := "boltdb" node3 := &management.Node{ + Id: nodeId3, BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -1253,7 +1277,7 @@ func TestCluster_GetCluster(t *testing.T) { } // create server - server3, err := NewServer(peerGrpcAddress3, nodeId3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) + server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() @@ -1300,6 +1324,7 @@ func TestCluster_GetCluster(t *testing.T) { expCluster1 := &management.Cluster{ Nodes: map[string]*management.Node{ nodeId1: { + Id: nodeId1, BindAddress: bindAddress1, State: management.Node_LEADER, Metadata: &management.Metadata{ @@ -1308,6 +1333,7 @@ func TestCluster_GetCluster(t *testing.T) { }, }, nodeId2: { + Id: nodeId2, BindAddress: bindAddress2, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ @@ -1316,6 +1342,7 @@ func TestCluster_GetCluster(t *testing.T) { }, }, nodeId3: { + Id: nodeId3, BindAddress: bindAddress3, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ @@ -1337,6 +1364,7 @@ func TestCluster_GetCluster(t *testing.T) { expCluster2 := &management.Cluster{ Nodes: map[string]*management.Node{ nodeId1: { + Id: nodeId1, BindAddress: bindAddress1, State: management.Node_LEADER, Metadata: &management.Metadata{ @@ -1345,6 +1373,7 @@ func TestCluster_GetCluster(t *testing.T) { }, }, nodeId2: { + Id: nodeId2, BindAddress: bindAddress2, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ @@ -1353,6 +1382,7 @@ func TestCluster_GetCluster(t *testing.T) { }, }, nodeId3: { + Id: nodeId3, BindAddress: bindAddress3, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ @@ -1374,6 +1404,7 @@ func TestCluster_GetCluster(t *testing.T) { expCluster3 := &management.Cluster{ Nodes: map[string]*management.Node{ nodeId1: { + Id: nodeId1, BindAddress: bindAddress1, State: management.Node_LEADER, Metadata: &management.Metadata{ @@ -1382,6 +1413,7 @@ func TestCluster_GetCluster(t *testing.T) { }, }, nodeId2: { + Id: nodeId2, BindAddress: bindAddress2, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ @@ -1390,6 +1422,7 @@ func TestCluster_GetCluster(t *testing.T) { }, }, nodeId3: { + Id: nodeId3, BindAddress: bindAddress3, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ @@ -1421,6 +1454,7 @@ func TestCluster_SetState(t *testing.T) { raftStorageType1 := "boltdb" node1 := &management.Node{ + Id: nodeId1, BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -1435,7 +1469,7 @@ func TestCluster_SetState(t *testing.T) { } // create server - server1, err := NewServer(peerGrpcAddress1, nodeId1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) + server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -1457,6 +1491,7 @@ func TestCluster_SetState(t *testing.T) { raftStorageType2 := "boltdb" node2 := &management.Node{ + Id: nodeId2, BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -1471,7 +1506,7 @@ func TestCluster_SetState(t *testing.T) { } // create server - server2, err := NewServer(peerGrpcAddress2, nodeId2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) + server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -1493,6 +1528,7 @@ func TestCluster_SetState(t *testing.T) { raftStorageType3 := "boltdb" node3 := &management.Node{ + Id: nodeId3, BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -1507,7 +1543,7 @@ func TestCluster_SetState(t *testing.T) { } // create server - server3, err := NewServer(peerGrpcAddress3, nodeId3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) + server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() @@ -1668,6 +1704,7 @@ func TestCluster_GetState(t *testing.T) { raftStorageType1 := "boltdb" node1 := &management.Node{ + Id: nodeId1, BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -1682,7 +1719,7 @@ func TestCluster_GetState(t *testing.T) { } // create server - server1, err := NewServer(peerGrpcAddress1, nodeId1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) + server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -1704,6 +1741,7 @@ func TestCluster_GetState(t *testing.T) { raftStorageType2 := "boltdb" node2 := &management.Node{ + Id: nodeId2, BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -1718,7 +1756,7 @@ func TestCluster_GetState(t *testing.T) { } // create server - server2, err := NewServer(peerGrpcAddress2, nodeId2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) + server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -1740,6 +1778,7 @@ func TestCluster_GetState(t *testing.T) { raftStorageType3 := "boltdb" node3 := &management.Node{ + Id: nodeId3, BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -1754,7 +1793,7 @@ func TestCluster_GetState(t *testing.T) { } // create server - server3, err := NewServer(peerGrpcAddress3, nodeId3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) + server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() @@ -1915,6 +1954,7 @@ func TestCluster_DeleteState(t *testing.T) { raftStorageType1 := "boltdb" node1 := &management.Node{ + Id: nodeId1, BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -1929,7 +1969,7 @@ func TestCluster_DeleteState(t *testing.T) { } // create server - server1, err := NewServer(peerGrpcAddress1, nodeId1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) + server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -1951,6 +1991,7 @@ func TestCluster_DeleteState(t *testing.T) { raftStorageType2 := "boltdb" node2 := &management.Node{ + Id: nodeId2, BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -1965,7 +2006,7 @@ func TestCluster_DeleteState(t *testing.T) { } // create server - server2, err := NewServer(peerGrpcAddress2, nodeId2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) + server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -1987,6 +2028,7 @@ func TestCluster_DeleteState(t *testing.T) { raftStorageType3 := "boltdb" node3 := &management.Node{ + Id: nodeId3, BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ @@ -2001,7 +2043,7 @@ func TestCluster_DeleteState(t *testing.T) { } // create server - server3, err := NewServer(peerGrpcAddress3, nodeId3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) + server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() diff --git a/protobuf/management/management.pb.go b/protobuf/management/management.pb.go index 83c85b3..a2554fb 100644 --- a/protobuf/management/management.pb.go +++ b/protobuf/management/management.pb.go @@ -308,9 +308,10 @@ func (m *Metadata) GetHttpAddress() string { } type Node struct { - BindAddress string `protobuf:"bytes,1,opt,name=bind_address,json=bindAddress,proto3" json:"bind_address,omitempty"` - State Node_State `protobuf:"varint,2,opt,name=state,proto3,enum=management.Node_State" json:"state,omitempty"` - Metadata *Metadata `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + BindAddress string `protobuf:"bytes,2,opt,name=bind_address,json=bindAddress,proto3" json:"bind_address,omitempty"` + State Node_State `protobuf:"varint,3,opt,name=state,proto3,enum=management.Node_State" json:"state,omitempty"` + Metadata *Metadata `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -341,6 +342,13 @@ func (m *Node) XXX_DiscardUnknown() { var xxx_messageInfo_Node proto.InternalMessageInfo +func (m *Node) GetId() string { + if m != nil { + return m.Id + } + return "" +} + func (m *Node) GetBindAddress() string { if m != nil { return m.BindAddress @@ -441,8 +449,7 @@ func (m *NodeInfoResponse) GetNode() *Node { } type ClusterJoinRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` + Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -473,13 +480,6 @@ func (m *ClusterJoinRequest) XXX_DiscardUnknown() { var xxx_messageInfo_ClusterJoinRequest proto.InternalMessageInfo -func (m *ClusterJoinRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - func (m *ClusterJoinRequest) GetNode() *Node { if m != nil { return m.Node @@ -567,9 +567,8 @@ func (m *ClusterInfoResponse) GetCluster() *Cluster { type ClusterWatchResponse struct { Event ClusterWatchResponse_Event `protobuf:"varint,1,opt,name=event,proto3,enum=management.ClusterWatchResponse_Event" json:"event,omitempty"` - Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - Node *Node `protobuf:"bytes,3,opt,name=node,proto3" json:"node,omitempty"` - Cluster *Cluster `protobuf:"bytes,4,opt,name=cluster,proto3" json:"cluster,omitempty"` + Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` + Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster,proto3" json:"cluster,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -607,13 +606,6 @@ func (m *ClusterWatchResponse) GetEvent() ClusterWatchResponse_Event { return ClusterWatchResponse_UNKNOWN } -func (m *ClusterWatchResponse) GetId() string { - if m != nil { - return m.Id - } - return "" -} - func (m *ClusterWatchResponse) GetNode() *Node { if m != nil { return m.Node @@ -916,68 +908,68 @@ func init() { } var fileDescriptor_5e030ad796566078 = []byte{ - // 965 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0x7f, 0x6f, 0xda, 0x46, - 0x18, 0xc6, 0x36, 0x0e, 0xe4, 0x25, 0x69, 0xad, 0x6b, 0x95, 0x26, 0x6c, 0xea, 0x92, 0x5b, 0x57, - 0x65, 0xeb, 0x0a, 0x15, 0x5b, 0xb5, 0x6c, 0xeb, 0x7e, 0xb8, 0xb1, 0x17, 0xa0, 0xd4, 0x44, 0x86, - 0x34, 0xea, 0x34, 0xa9, 0x32, 0xf8, 0x0a, 0x28, 0x60, 0x33, 0x7c, 0x44, 0xca, 0x67, 0xd8, 0xa4, - 0x7d, 0x93, 0xfd, 0xbb, 0x6f, 0xb2, 0xcf, 0xb1, 0x8f, 0x30, 0x9d, 0xef, 0x6c, 0x6c, 0x30, 0xa4, - 0xff, 0x71, 0xef, 0x3d, 0xcf, 0x73, 0xcf, 0xfb, 0xde, 0xbd, 0xaf, 0x81, 0x47, 0xd3, 0x99, 0x4f, - 0xfd, 0xde, 0xfc, 0x7d, 0x75, 0xe2, 0x78, 0xce, 0x80, 0x4c, 0x88, 0x47, 0x13, 0x3f, 0x2b, 0xe1, - 0x36, 0x82, 0x45, 0xa4, 0x7c, 0x30, 0xf0, 0xfd, 0xc1, 0x98, 0x54, 0x63, 0xa2, 0xe3, 0xdd, 0x70, - 0x58, 0xf9, 0xa3, 0xe5, 0x2d, 0x32, 0x99, 0x52, 0xb1, 0x89, 0xff, 0x92, 0x60, 0xcf, 0xf2, 0x5d, - 0x52, 0x27, 0xce, 0x98, 0x0e, 0x4f, 0x87, 0xa4, 0x7f, 0x65, 0x93, 0xdf, 0xe7, 0x24, 0xa0, 0xe8, - 0x47, 0x50, 0xa7, 0x33, 0xbf, 0x47, 0xf6, 0xa5, 0x43, 0xe9, 0xf8, 0x4e, 0xed, 0xb8, 0x92, 0x30, - 0x90, 0x4d, 0xa9, 0x9c, 0x33, 0xbc, 0xcd, 0x69, 0xf8, 0x39, 0xa8, 0xe1, 0x1a, 0xdd, 0x85, 0x52, - 0xdd, 0xd4, 0x5b, 0xdd, 0x7a, 0xc3, 0x32, 0x3b, 0x1d, 0x2d, 0x87, 0x76, 0xa0, 0xd8, 0x6a, 0xbc, - 0x31, 0xc3, 0x95, 0x84, 0x76, 0x61, 0xdb, 0x36, 0x75, 0x83, 0x6f, 0xca, 0xf8, 0x6f, 0x09, 0x1e, - 0xac, 0xc8, 0x07, 0x53, 0xdf, 0x0b, 0x08, 0xfa, 0x09, 0xd4, 0x80, 0x3a, 0x34, 0xb2, 0xf4, 0xf9, - 0x46, 0x4b, 0x9c, 0x53, 0xe9, 0x30, 0x82, 0xcd, 0x79, 0xd8, 0x06, 0x35, 0x5c, 0xa3, 0x12, 0x14, - 0xb8, 0xa7, 0xb7, 0x5a, 0x8e, 0x39, 0xb8, 0xb0, 0xa2, 0xa5, 0x84, 0xb6, 0x41, 0xd5, 0x99, 0x3f, - 0x4d, 0x46, 0x45, 0xc8, 0x1b, 0xa6, 0x6e, 0x68, 0x0a, 0x0b, 0x32, 0x97, 0x6f, 0xb5, 0x3c, 0x83, - 0x5b, 0xed, 0xee, 0x3b, 0xbe, 0x54, 0xf1, 0x39, 0x14, 0x5f, 0x13, 0xea, 0xb8, 0x0e, 0x75, 0xd0, - 0x11, 0xec, 0x0c, 0x66, 0xd3, 0xfe, 0x3b, 0xc7, 0x75, 0x67, 0x24, 0x08, 0x42, 0x9f, 0xdb, 0x76, - 0x89, 0xc5, 0x74, 0x1e, 0x62, 0x90, 0x21, 0xa5, 0xd3, 0x18, 0x22, 0x73, 0x08, 0x8b, 0x09, 0x08, - 0xfe, 0x57, 0x82, 0x3c, 0x4b, 0x87, 0x61, 0x7b, 0x23, 0xcf, 0x5d, 0x96, 0x63, 0xb1, 0x48, 0xee, - 0xcb, 0xa8, 0x24, 0x72, 0x58, 0x92, 0xbd, 0xe5, 0x92, 0xa4, 0xf2, 0x47, 0xcf, 0xa0, 0x38, 0x11, - 0x5e, 0xf7, 0x95, 0x43, 0xe9, 0xb8, 0x54, 0xbb, 0x9f, 0x24, 0x44, 0x79, 0xd8, 0x31, 0x0a, 0xbf, - 0x4a, 0x54, 0xec, 0xc2, 0x7a, 0x65, 0xb5, 0x2f, 0x2d, 0x7e, 0x83, 0xbf, 0xb4, 0x5b, 0xad, 0xf6, - 0xa5, 0x69, 0xf3, 0x1b, 0x3c, 0xd5, 0x2d, 0xa3, 0x61, 0xe8, 0x5d, 0x56, 0x34, 0x80, 0xad, 0x96, - 0xa9, 0x1b, 0xa6, 0xad, 0x29, 0x0c, 0xd8, 0xa9, 0x5f, 0x74, 0x0d, 0x46, 0xcb, 0xe3, 0x3f, 0x24, - 0x28, 0x9c, 0x8e, 0xe7, 0x01, 0x25, 0x33, 0xf4, 0x35, 0xa8, 0x9e, 0xef, 0x12, 0x96, 0x94, 0x72, - 0x5c, 0xaa, 0x3d, 0x4c, 0xfa, 0x10, 0x98, 0x30, 0x81, 0xc0, 0xf4, 0xe8, 0xec, 0xc6, 0xe6, 0xe0, - 0x72, 0x13, 0x60, 0x11, 0x44, 0x1a, 0x28, 0x57, 0xe4, 0x46, 0x94, 0x85, 0xfd, 0x44, 0x8f, 0x41, - 0xbd, 0x76, 0xc6, 0x73, 0x5e, 0x8e, 0x52, 0x4d, 0x5b, 0x2e, 0x87, 0xcd, 0xb7, 0xbf, 0x93, 0x4f, - 0x24, 0x7c, 0x02, 0x1a, 0x0b, 0x35, 0xbc, 0xf7, 0x7e, 0xfc, 0xc2, 0x1e, 0x41, 0x9e, 0x1d, 0x14, - 0x4a, 0x66, 0xd1, 0xc3, 0x5d, 0xdc, 0x04, 0x24, 0x2c, 0x36, 0xfd, 0x91, 0x17, 0x35, 0xcc, 0x1d, - 0x90, 0x47, 0xae, 0x30, 0x23, 0x8f, 0xdc, 0x58, 0x4b, 0xde, 0xa8, 0xf5, 0x19, 0xdc, 0x13, 0x5a, - 0x2d, 0xe2, 0x5c, 0x93, 0x35, 0x62, 0xd8, 0x88, 0x61, 0x29, 0xbf, 0x4f, 0xa1, 0xd0, 0xe7, 0x61, - 0x61, 0xf9, 0x5e, 0x46, 0x1d, 0xed, 0x08, 0x83, 0xff, 0x93, 0xe0, 0xbe, 0x08, 0x5e, 0x3a, 0xb4, - 0x3f, 0x8c, 0x75, 0x5e, 0x80, 0x4a, 0xae, 0x89, 0x47, 0x45, 0x67, 0x3d, 0xce, 0x50, 0x49, 0x11, - 0x2a, 0x26, 0x43, 0xdb, 0x9c, 0x24, 0xcc, 0xca, 0x2b, 0x99, 0x2b, 0x9b, 0x32, 0x4f, 0x7a, 0xcf, - 0x7f, 0x80, 0xf7, 0xe7, 0xa0, 0x86, 0x87, 0xa6, 0x5f, 0x62, 0x11, 0xf2, 0xcd, 0x76, 0xc3, 0xe2, - 0x6d, 0xdb, 0x32, 0xf5, 0x37, 0xe2, 0x05, 0x5e, 0x9c, 0x87, 0xaf, 0x51, 0xc1, 0x0f, 0x01, 0xce, - 0x08, 0x8d, 0xca, 0xba, 0xf2, 0x62, 0xf0, 0xb7, 0x50, 0x0a, 0xf7, 0x45, 0x21, 0xbe, 0x88, 0x1e, - 0x90, 0x24, 0xda, 0x83, 0x4f, 0xcf, 0x4a, 0x34, 0x3d, 0x2b, 0xba, 0x77, 0x23, 0x1e, 0x11, 0x6e, - 0x02, 0x74, 0x36, 0x48, 0x2f, 0xb4, 0xe4, 0xdb, 0xb5, 0x8e, 0x60, 0xd7, 0x20, 0x63, 0x42, 0xc9, - 0x7a, 0xa7, 0x87, 0xb0, 0x23, 0xee, 0x60, 0x1d, 0xe2, 0x1f, 0x09, 0x76, 0xd3, 0xf7, 0xfa, 0x3d, - 0x14, 0xfa, 0xfe, 0x64, 0xe2, 0x78, 0xae, 0xb8, 0xd9, 0xa3, 0x64, 0x8d, 0xd3, 0x57, 0x7a, 0xca, - 0x81, 0x76, 0xc4, 0x88, 0x0e, 0x90, 0x33, 0x32, 0x52, 0x6e, 0xcf, 0xe8, 0x09, 0x14, 0x84, 0x62, - 0xfa, 0xc6, 0x0a, 0xa0, 0x74, 0xcc, 0xae, 0x26, 0xb1, 0x5b, 0x32, 0xcc, 0x96, 0xc9, 0x66, 0x46, - 0xed, 0xcf, 0x2d, 0x80, 0xd7, 0xb1, 0x31, 0xf4, 0x1b, 0xdc, 0x5d, 0x9a, 0xe7, 0x08, 0xdf, 0xfe, - 0xfd, 0x29, 0x7f, 0xfa, 0x01, 0x1f, 0x04, 0x9c, 0x43, 0x2f, 0xa1, 0x18, 0x35, 0x3e, 0xda, 0x5b, - 0x49, 0xc1, 0x64, 0x9f, 0xc7, 0xf2, 0xc7, 0xcb, 0x52, 0xc9, 0xb6, 0xc3, 0x39, 0x74, 0x06, 0xa5, - 0xc4, 0x08, 0x40, 0x59, 0xe3, 0x2b, 0x31, 0x1b, 0xca, 0x6b, 0x8e, 0xc1, 0x39, 0xd4, 0x80, 0x9d, - 0x64, 0xff, 0xa3, 0x4f, 0x32, 0x94, 0x92, 0x93, 0x61, 0x83, 0x54, 0x3d, 0xf6, 0xb4, 0x31, 0xb5, - 0xac, 0x13, 0x96, 0xb2, 0x6b, 0xc5, 0xa6, 0xc2, 0x27, 0xb2, 0x56, 0xea, 0xf0, 0xb6, 0x39, 0x81, - 0x73, 0xcf, 0x24, 0x74, 0x02, 0xca, 0x19, 0xa1, 0x28, 0xf5, 0x6d, 0x5a, 0xf4, 0x64, 0xf9, 0xc1, - 0x4a, 0x3c, 0xf6, 0xf1, 0x0d, 0x28, 0x9d, 0x65, 0xe6, 0xa2, 0xe5, 0x36, 0x94, 0xe2, 0x07, 0xd8, - 0xe2, 0xed, 0x84, 0x0e, 0x92, 0xdc, 0x54, 0x8b, 0x6d, 0xa0, 0xff, 0x0c, 0x2a, 0x4f, 0x7c, 0x3f, - 0xa3, 0x5d, 0x38, 0xf9, 0x60, 0x6d, 0x23, 0x85, 0x39, 0xbf, 0x80, 0x62, 0xc7, 0x73, 0xa6, 0xc1, - 0xd0, 0xa7, 0x6b, 0xab, 0xb7, 0xf6, 0xfc, 0x97, 0x4f, 0x7f, 0x7d, 0x32, 0x18, 0xd1, 0xe1, 0xbc, - 0x57, 0xe9, 0xfb, 0x93, 0xea, 0xc4, 0x0f, 0xe6, 0x57, 0x4e, 0xb5, 0x37, 0x76, 0x02, 0x5a, 0xcd, - 0xf8, 0x6b, 0xd8, 0xdb, 0x0a, 0x83, 0x5f, 0xfd, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x2d, 0xa1, 0xdf, - 0xcd, 0x38, 0x0a, 0x00, 0x00, + // 963 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xed, 0x6e, 0xdb, 0x36, + 0x14, 0xb5, 0x2c, 0x2b, 0x76, 0xae, 0x93, 0x56, 0x60, 0x8b, 0x34, 0xf1, 0x86, 0x2e, 0xe1, 0xba, + 0x22, 0x5b, 0x57, 0xa7, 0xf0, 0x56, 0x2c, 0xeb, 0xba, 0x0f, 0x35, 0xd2, 0x62, 0xa7, 0xaa, 0x1c, + 0xc8, 0x4e, 0x83, 0x0e, 0x03, 0x0a, 0xd9, 0x62, 0x6d, 0x23, 0xb6, 0xe4, 0x59, 0x74, 0x80, 0x3c, + 0xc3, 0x06, 0xec, 0x4d, 0xf6, 0x77, 0xaf, 0xb3, 0x5f, 0x7b, 0x8e, 0x82, 0x22, 0x25, 0x4b, 0x8a, + 0xec, 0xe4, 0x9f, 0x79, 0x79, 0xce, 0xe5, 0xb9, 0x87, 0xf7, 0x52, 0x86, 0x47, 0xd3, 0x99, 0x4f, + 0xfd, 0xde, 0xfc, 0xc3, 0xc1, 0xc4, 0xf1, 0x9c, 0x01, 0x99, 0x10, 0x8f, 0x26, 0x7e, 0xd6, 0xc3, + 0x6d, 0x04, 0x8b, 0x48, 0x6d, 0x67, 0xe0, 0xfb, 0x83, 0x31, 0x39, 0x88, 0x89, 0x8e, 0x77, 0xc5, + 0x61, 0xb5, 0x4f, 0xb2, 0x5b, 0x64, 0x32, 0xa5, 0x62, 0x13, 0xff, 0x2d, 0xc1, 0x96, 0xe5, 0xbb, + 0xa4, 0x49, 0x9c, 0x31, 0x1d, 0x1e, 0x0d, 0x49, 0xff, 0xc2, 0x26, 0x7f, 0xcc, 0x49, 0x40, 0xd1, + 0x4f, 0xa0, 0x4c, 0x67, 0x7e, 0x8f, 0x6c, 0x4b, 0xbb, 0xd2, 0xfe, 0x9d, 0xc6, 0x7e, 0x3d, 0x21, + 0x20, 0x9f, 0x52, 0x3f, 0x65, 0x78, 0x9b, 0xd3, 0xf0, 0x73, 0x50, 0xc2, 0x35, 0xba, 0x0b, 0xd5, + 0xa6, 0xa1, 0x99, 0xdd, 0x66, 0xcb, 0x32, 0x3a, 0x1d, 0xb5, 0x80, 0x36, 0xa0, 0x62, 0xb6, 0xde, + 0x1a, 0xe1, 0x4a, 0x42, 0x9b, 0xb0, 0x6e, 0x1b, 0x9a, 0xce, 0x37, 0x8b, 0xf8, 0x1f, 0x09, 0x1e, + 0x5c, 0x4b, 0x1f, 0x4c, 0x7d, 0x2f, 0x20, 0xe8, 0x67, 0x50, 0x02, 0xea, 0xd0, 0x48, 0xd2, 0x97, + 0x2b, 0x25, 0x71, 0x4e, 0xbd, 0xc3, 0x08, 0x36, 0xe7, 0x61, 0x1b, 0x94, 0x70, 0x8d, 0xaa, 0x50, + 0xe6, 0x9a, 0xde, 0xa9, 0x05, 0xa6, 0xe0, 0xcc, 0x8a, 0x96, 0x12, 0x5a, 0x07, 0x45, 0x63, 0xfa, + 0xd4, 0x22, 0xaa, 0x40, 0x49, 0x37, 0x34, 0x5d, 0x95, 0x59, 0x90, 0xa9, 0x7c, 0xa7, 0x96, 0x18, + 0xdc, 0x6a, 0x77, 0xdf, 0xf3, 0xa5, 0x82, 0x4f, 0xa1, 0xf2, 0x86, 0x50, 0xc7, 0x75, 0xa8, 0x83, + 0xf6, 0x60, 0x63, 0x30, 0x9b, 0xf6, 0xdf, 0x3b, 0xae, 0x3b, 0x23, 0x41, 0x10, 0xea, 0x5c, 0xb7, + 0xab, 0x2c, 0xa6, 0xf1, 0x10, 0x83, 0x0c, 0x29, 0x9d, 0xc6, 0x90, 0x22, 0x87, 0xb0, 0x98, 0x80, + 0xe0, 0xff, 0x25, 0x28, 0xb1, 0x72, 0xd0, 0x1d, 0x28, 0x8e, 0x5c, 0x91, 0xa4, 0x38, 0x72, 0x19, + 0xb7, 0x37, 0xf2, 0xdc, 0x2c, 0x97, 0xc5, 0xa2, 0xf4, 0x5f, 0x47, 0x16, 0xc9, 0xa1, 0x45, 0x5b, + 0x59, 0x8b, 0x52, 0x7e, 0xa0, 0x67, 0x50, 0x99, 0x08, 0xed, 0xdb, 0xa5, 0x5d, 0x69, 0xbf, 0xda, + 0xb8, 0x9f, 0x24, 0x44, 0x75, 0xd9, 0x31, 0x0a, 0xbf, 0x4e, 0x38, 0x78, 0x66, 0xbd, 0xb6, 0xda, + 0xe7, 0x16, 0xbf, 0xd1, 0x5f, 0xdb, 0xa6, 0xd9, 0x3e, 0x37, 0x6c, 0x7e, 0xa3, 0x47, 0x9a, 0xa5, + 0xb7, 0x74, 0xad, 0xcb, 0x4c, 0x04, 0x58, 0x33, 0x0d, 0x4d, 0x37, 0x6c, 0x55, 0x66, 0xc0, 0x4e, + 0xf3, 0xac, 0xab, 0x33, 0x5a, 0x09, 0xff, 0x29, 0x41, 0xf9, 0x68, 0x3c, 0x0f, 0x28, 0x99, 0xa1, + 0x6f, 0x41, 0xf1, 0x7c, 0x97, 0x30, 0xcf, 0xe4, 0xfd, 0x6a, 0xe3, 0x61, 0x52, 0x87, 0xc0, 0x84, + 0x05, 0x04, 0x86, 0x47, 0x67, 0x57, 0x36, 0x07, 0xd7, 0x4e, 0x00, 0x16, 0x41, 0xa4, 0x82, 0x7c, + 0x41, 0xae, 0x84, 0x61, 0xec, 0x27, 0x7a, 0x0c, 0xca, 0xa5, 0x33, 0x9e, 0x93, 0xd0, 0xaa, 0x6a, + 0x43, 0xcd, 0xda, 0x61, 0xf3, 0xed, 0x17, 0xc5, 0x43, 0x09, 0x1f, 0x82, 0xca, 0x42, 0x2d, 0xef, + 0x83, 0x1f, 0x77, 0xdc, 0x23, 0x28, 0xb1, 0x83, 0xc2, 0x94, 0x79, 0xf4, 0x70, 0x17, 0xbf, 0x00, + 0x24, 0x24, 0x9e, 0xf8, 0x23, 0x2f, 0x1a, 0xa0, 0xdb, 0x71, 0xbf, 0x80, 0x7b, 0x82, 0x6b, 0x12, + 0xe7, 0x92, 0x44, 0xe4, 0xcc, 0xd5, 0x63, 0x3d, 0x86, 0xa5, 0xf4, 0x3d, 0x85, 0x72, 0x9f, 0x87, + 0xc5, 0x31, 0xf7, 0x72, 0x7c, 0xb3, 0x23, 0x0c, 0xfe, 0x4f, 0x82, 0xfb, 0x22, 0x78, 0xee, 0xd0, + 0xfe, 0x30, 0xce, 0xf3, 0x12, 0x14, 0x72, 0x49, 0x3c, 0x2a, 0x26, 0xeb, 0x71, 0x4e, 0x96, 0x14, + 0xa1, 0x6e, 0x30, 0xb4, 0xcd, 0x49, 0x71, 0xa5, 0xc5, 0x55, 0x95, 0x26, 0xb5, 0xca, 0xb7, 0xd0, + 0xfa, 0x1c, 0x94, 0xf0, 0x90, 0x74, 0xa7, 0x55, 0xa0, 0x74, 0xd2, 0x6e, 0x59, 0x7c, 0x4c, 0x4d, + 0x43, 0x7b, 0x2b, 0x3a, 0xec, 0xec, 0x34, 0xec, 0x36, 0x19, 0x3f, 0x04, 0x38, 0x26, 0x34, 0xb2, + 0xf1, 0x5a, 0x47, 0xe0, 0xef, 0xa1, 0x1a, 0xee, 0x8b, 0xc2, 0xbf, 0x8a, 0x1a, 0x44, 0x12, 0xed, + 0xcf, 0x5f, 0xcb, 0x7a, 0xf4, 0x5a, 0xd6, 0x35, 0xef, 0x4a, 0x34, 0x09, 0x3e, 0x01, 0xe8, 0xac, + 0x48, 0xbd, 0xc8, 0x55, 0xbc, 0x39, 0xd7, 0x1e, 0x6c, 0xea, 0x64, 0x4c, 0x28, 0x59, 0xae, 0x74, + 0x17, 0x36, 0x84, 0xe7, 0xcb, 0x10, 0xff, 0x4a, 0xb0, 0x99, 0xbe, 0xc7, 0x1f, 0xa0, 0xdc, 0xf7, + 0x27, 0x13, 0xc7, 0x73, 0xc5, 0x4d, 0xee, 0x25, 0x3d, 0x4e, 0x5f, 0xe1, 0x11, 0x07, 0xda, 0x11, + 0x23, 0x3a, 0xa0, 0x98, 0x53, 0x91, 0x7c, 0x73, 0x45, 0x4f, 0xa0, 0x2c, 0x32, 0xa6, 0x6f, 0xac, + 0x0c, 0x72, 0xc7, 0xe8, 0xaa, 0x12, 0xbb, 0x25, 0xdd, 0x30, 0x0d, 0xf6, 0x26, 0x34, 0xfe, 0x5a, + 0x03, 0x78, 0x13, 0x0b, 0x43, 0xbf, 0xc3, 0xdd, 0xcc, 0xfb, 0x8d, 0xf0, 0xcd, 0xdf, 0x9b, 0xda, + 0xe7, 0xb7, 0xf8, 0x00, 0xe0, 0x02, 0x7a, 0x05, 0x95, 0x68, 0xb0, 0xd1, 0xd6, 0xb5, 0x12, 0x0c, + 0xf6, 0x39, 0xac, 0x7d, 0x9a, 0x4d, 0x95, 0x1c, 0x33, 0x5c, 0x40, 0xc7, 0x50, 0x4d, 0x8c, 0x38, + 0xca, 0x7b, 0x9e, 0x12, 0xb3, 0x5f, 0x5b, 0x72, 0x0c, 0x2e, 0xa0, 0x16, 0x6c, 0x24, 0xe7, 0x1d, + 0x7d, 0x96, 0x93, 0x29, 0xf9, 0x12, 0xac, 0x48, 0xd5, 0x8c, 0x35, 0xad, 0x2c, 0x2d, 0xef, 0x84, + 0x4c, 0x75, 0x66, 0x2c, 0x2a, 0x6c, 0x91, 0xa5, 0xa9, 0x76, 0x6f, 0x7a, 0x17, 0x70, 0xe1, 0x99, + 0x84, 0x0e, 0x41, 0x3e, 0x26, 0x14, 0xa5, 0xbe, 0x3d, 0x8b, 0x99, 0xac, 0x3d, 0xb8, 0x16, 0x8f, + 0x75, 0x7c, 0x07, 0x72, 0x27, 0xcb, 0x5c, 0x8c, 0xdc, 0x0a, 0x2b, 0x7e, 0x84, 0x35, 0x3e, 0x4e, + 0x68, 0x27, 0xc9, 0x4d, 0x8d, 0xd8, 0x0a, 0xfa, 0x2f, 0xa0, 0xf0, 0xc2, 0xb7, 0x73, 0xc6, 0x85, + 0x93, 0x77, 0x96, 0x0e, 0x52, 0x58, 0xf3, 0x4b, 0xa8, 0x74, 0x3c, 0x67, 0x1a, 0x0c, 0x7d, 0xba, + 0xd4, 0xbd, 0xa5, 0xe7, 0xbf, 0x7a, 0xfa, 0xdb, 0x93, 0xc1, 0x88, 0x0e, 0xe7, 0xbd, 0x7a, 0xdf, + 0x9f, 0x1c, 0x4c, 0xfc, 0x60, 0x7e, 0xe1, 0x1c, 0xf4, 0xc6, 0x4e, 0x40, 0x0f, 0x72, 0xfe, 0x0a, + 0xf6, 0xd6, 0xc2, 0xe0, 0x37, 0x1f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb6, 0xdd, 0x42, 0x6a, 0x28, + 0x0a, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/protobuf/management/management.proto b/protobuf/management/management.proto index 2f0adf7..2a7d736 100644 --- a/protobuf/management/management.proto +++ b/protobuf/management/management.proto @@ -71,9 +71,10 @@ message Node { LEADER = 3; SHUTDOWN = 4; } - string bind_address = 1; - State state = 2; - Metadata metadata = 3; + string id = 1; + string bind_address = 2; + State state = 3; + Metadata metadata = 4; } message Cluster { @@ -85,8 +86,7 @@ message NodeInfoResponse { } message ClusterJoinRequest { - string id = 1; - Node node = 2; + Node node = 1; } message ClusterLeaveRequest { @@ -105,9 +105,8 @@ message ClusterWatchResponse { UPDATE = 3; } Event event = 1; - string id = 2; - Node node = 3; - Cluster cluster = 4; + Node node = 2; + Cluster cluster = 3; } message GetRequest { From e1ba6d22c2e1a278717847a3d950cd0a24de70e6 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Thu, 1 Aug 2019 12:41:11 +0900 Subject: [PATCH 19/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index f527241..7dd4954 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -20,6 +20,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Change the cluster watching method #90 - Change cluster watch command for manager #92 - Change node state to enum from string #93 +- Change node info structure #94 ## [v0.7.1] - 2019-07-18 From 1a3dc9fcede13a719aefca056b8ce85fbe050d89 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Mon, 5 Aug 2019 00:30:05 +0900 Subject: [PATCH 20/76] Change protobuf for indexer and dispatcher (#95) --- README.md | 84 +- cmd/blast/dispatcher_node_health.go | 41 +- cmd/blast/indexer_cluster_info.go | 2 +- cmd/blast/indexer_cluster_leave.go | 5 +- cmd/blast/indexer_cluster_watch.go | 29 +- cmd/blast/indexer_node_health.go | 40 +- cmd/blast/indexer_node_info.go | 18 +- cmd/blast/indexer_start.go | 33 +- cmd/blast/main.go | 105 +- cmd/blast/manager_cluster_leave.go | 3 +- cmd/blast/manager_node_info.go | 6 +- cmd/blast/manager_watch.go | 2 +- dispatcher/grpc_client.go | 25 +- dispatcher/grpc_service.go | 425 ++++--- dispatcher/server_test.go | 378 +++--- indexer/grpc_client.go | 76 +- indexer/grpc_service.go | 666 ++++++----- indexer/index.go | 3 +- indexer/raft_fsm.go | 79 +- indexer/raft_server.go | 199 ++-- indexer/server.go | 104 +- indexer/server_test.go | 1599 ++++++++++++++------------ manager/grpc_client.go | 10 + manager/grpc_service.go | 15 +- manager/raft_fsm.go | 4 +- manager/raft_server.go | 7 +- manager/server_test.go | 85 +- protobuf/distribute/distribute.pb.go | 297 +++-- protobuf/distribute/distribute.proto | 29 +- protobuf/index/index.pb.go | 934 +++++++++------ protobuf/index/index.proto | 90 +- 31 files changed, 2912 insertions(+), 2481 deletions(-) diff --git a/README.md b/README.md index 9012660..e25d40d 100644 --- a/README.md +++ b/README.md @@ -273,15 +273,13 @@ You can see the result in JSON format. The result of the above command is: ```json { - "node_config": { - "bind_addr": ":2000", - "data_dir": "/tmp/blast/indexer1", - "grpc_addr": ":5000", - "http_addr": ":8000", - "node_id": "indexer1", - "raft_storage_type": "boltdb" - }, - "state": "Leader" + "id": "indexer1", + "bind_address": ":2000", + "state": 3, + "metadata": { + "grpc_address": ":5000", + "http_address": ":8000" + } } ``` @@ -684,38 +682,34 @@ You can see the result in JSON format. The result of the above command is: ```json { - "indexer1": { - "node_config": { - "bind_addr": ":2000", - "data_dir": "/tmp/blast/indexer1", - "grpc_addr": ":5000", - "http_addr": ":8000", - "node_id": "indexer1", - "raft_storage_type": "boltdb" - }, - "state": "Leader" - }, - "indexer2": { - "node_config": { - "bind_addr": ":2010", - "data_dir": "/tmp/blast/indexer2", - "grpc_addr": ":5010", - "http_addr": ":8010", - "node_id": "indexer2", - "raft_storage_type": "boltdb" + "nodes": { + "indexer1": { + "id": "indexer1", + "bind_address": ":2000", + "state": 3, + "metadata": { + "grpc_address": ":5000", + "http_address": ":8000" + } }, - "state": "Follower" - }, - "indexer3": { - "node_config": { - "bind_addr": ":2020", - "data_dir": "/tmp/blast/indexer3", - "grpc_addr": ":5020", - "http_addr": ":8020", - "node_id": "indexer3", - "raft_storage_type": "boltdb" + "indexer2": { + "id": "indexer2", + "bind_address": ":2010", + "state": 1, + "metadata": { + "grpc_address": ":5010", + "http_address": ":8010" + } }, - "state": "Follower" + "indexer3": { + "id": "indexer3", + "bind_address": ":2020", + "state": 1, + "metadata": { + "grpc_address": ":5020", + "http_address": ":8020" + } + } } } ``` @@ -786,9 +780,9 @@ Manager can also bring up a cluster like an indexer. Specify a common index mapp $ ./bin/blast manager start \ --grpc-address=:5100 \ --http-address=:8100 \ - --node-id=cluster1 \ + --node-id=manager1 \ --node-address=:2100 \ - --data-dir=/tmp/blast/cluster1 \ + --data-dir=/tmp/blast/manager1 \ --raft-storage-type=boltdb \ --index-mapping-file=./example/wiki_index_mapping.json \ --index-type=upside_down \ @@ -798,18 +792,18 @@ $ ./bin/blast manager start \ --peer-grpc-address=:5100 \ --grpc-address=:5110 \ --http-address=:8110 \ - --node-id=cluster2 \ + --node-id=manager2 \ --node-address=:2110 \ - --data-dir=/tmp/blast/cluster2 \ + --data-dir=/tmp/blast/manager2 \ --raft-storage-type=boltdb $ ./bin/blast manager start \ --peer-grpc-address=:5100 \ --grpc-address=:5120 \ --http-address=:8120 \ - --node-id=cluster3 \ + --node-id=manager3 \ --node-address=:2120 \ - --data-dir=/tmp/blast/cluster3 \ + --data-dir=/tmp/blast/manager3 \ --raft-storage-type=boltdb ``` diff --git a/cmd/blast/dispatcher_node_health.go b/cmd/blast/dispatcher_node_health.go index 698473e..5fb1b8f 100644 --- a/cmd/blast/dispatcher_node_health.go +++ b/cmd/blast/dispatcher_node_health.go @@ -18,12 +18,15 @@ import ( "fmt" "os" + "github.com/mosuka/blast/protobuf/distribute" + "github.com/mosuka/blast/dispatcher" "github.com/urfave/cli" ) func dispatcherNodeHealth(c *cli.Context) error { grpcAddr := c.String("grpc-address") + healthiness := c.Bool("healthiness") liveness := c.Bool("liveness") readiness := c.Bool("readiness") @@ -38,34 +41,30 @@ func dispatcherNodeHealth(c *cli.Context) error { } }() - if !liveness && !readiness { - LivenessState, err := client.LivenessProbe() + var state string + if healthiness { + state, err = client.NodeHealthCheck(distribute.NodeHealthCheckRequest_HEALTHINESS.String()) if err != nil { - return err + state = distribute.NodeHealthCheckResponse_UNHEALTHY.String() } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", LivenessState)) - - readinessState, err := client.ReadinessProbe() + } else if liveness { + state, err = client.NodeHealthCheck(distribute.NodeHealthCheckRequest_LIVENESS.String()) if err != nil { - return err + state = distribute.NodeHealthCheckResponse_DEAD.String() } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", readinessState)) - } else { - if liveness { - state, err := client.LivenessProbe() - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + } else if readiness { + state, err = client.NodeHealthCheck(distribute.NodeHealthCheckRequest_READINESS.String()) + if err != nil { + state = distribute.NodeHealthCheckResponse_NOT_READY.String() } - if readiness { - state, err := client.ReadinessProbe() - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + } else { + state, err = client.NodeHealthCheck(distribute.NodeHealthCheckRequest_HEALTHINESS.String()) + if err != nil { + state = distribute.NodeHealthCheckResponse_UNHEALTHY.String() } } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + return nil } diff --git a/cmd/blast/indexer_cluster_info.go b/cmd/blast/indexer_cluster_info.go index 3e8f1d8..434c011 100644 --- a/cmd/blast/indexer_cluster_info.go +++ b/cmd/blast/indexer_cluster_info.go @@ -37,7 +37,7 @@ func indexerClusterInfo(c *cli.Context) error { } }() - cluster, err := client.GetCluster() + cluster, err := client.ClusterInfo() if err != nil { return err } diff --git a/cmd/blast/indexer_cluster_leave.go b/cmd/blast/indexer_cluster_leave.go index b0be2d9..e564256 100644 --- a/cmd/blast/indexer_cluster_leave.go +++ b/cmd/blast/indexer_cluster_leave.go @@ -33,10 +33,9 @@ func indexerClusterLeave(c *cli.Context) error { // get grpc address of leader node } - grpcAddr := c.String("grpc-address") nodeId := c.String("node-id") - client, err := indexer.NewGRPCClient(grpcAddr) + client, err := indexer.NewGRPCClient(peerGrpcAddr) if err != nil { return err } @@ -47,7 +46,7 @@ func indexerClusterLeave(c *cli.Context) error { } }() - err = client.DeleteNode(nodeId) + err = client.ClusterLeave(nodeId) if err != nil { return err } diff --git a/cmd/blast/indexer_cluster_watch.go b/cmd/blast/indexer_cluster_watch.go index 1a5097f..ba99bdb 100644 --- a/cmd/blast/indexer_cluster_watch.go +++ b/cmd/blast/indexer_cluster_watch.go @@ -16,14 +16,13 @@ package main import ( "encoding/json" - "errors" "fmt" "io" "log" "os" "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" ) @@ -41,12 +40,22 @@ func indexerClusterWatch(c *cli.Context) error { } }() - err = indexerClusterInfo(c) + cluster, err := client.ClusterInfo() if err != nil { return err } + resp := &index.ClusterWatchResponse{ + Event: 0, + Node: nil, + Cluster: cluster, + } + clusterBytes, err := json.MarshalIndent(resp, "", " ") + if err != nil { + return err + } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(clusterBytes))) - watchClient, err := client.WatchCluster() + watchClient, err := client.ClusterWatch() if err != nil { return err } @@ -61,17 +70,7 @@ func indexerClusterWatch(c *cli.Context) error { break } - cluster, err := protobuf.MarshalAny(resp.Cluster) - if err != nil { - return err - } - if cluster == nil { - return errors.New("nil") - } - - var clusterBytes []byte - clusterMap := *cluster.(*map[string]interface{}) - clusterBytes, err = json.MarshalIndent(clusterMap, "", " ") + clusterBytes, err = json.MarshalIndent(resp, "", " ") if err != nil { return err } diff --git a/cmd/blast/indexer_node_health.go b/cmd/blast/indexer_node_health.go index beab1c0..aedb6eb 100644 --- a/cmd/blast/indexer_node_health.go +++ b/cmd/blast/indexer_node_health.go @@ -19,11 +19,13 @@ import ( "os" "github.com/mosuka/blast/indexer" + "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" ) func indexerNodeHealth(c *cli.Context) error { grpcAddr := c.String("grpc-address") + healthiness := c.Bool("healthiness") liveness := c.Bool("liveness") readiness := c.Bool("readiness") @@ -38,34 +40,30 @@ func indexerNodeHealth(c *cli.Context) error { } }() - if !liveness && !readiness { - LivenessState, err := client.LivenessProbe() + var state string + if healthiness { + state, err = client.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) if err != nil { - return err + state = index.NodeHealthCheckResponse_UNHEALTHY.String() } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", LivenessState)) - - readinessState, err := client.ReadinessProbe() + } else if liveness { + state, err = client.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) if err != nil { - return err + state = index.NodeHealthCheckResponse_DEAD.String() } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", readinessState)) - } else { - if liveness { - state, err := client.LivenessProbe() - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + } else if readiness { + state, err = client.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) + if err != nil { + state = index.NodeHealthCheckResponse_NOT_READY.String() } - if readiness { - state, err := client.ReadinessProbe() - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + } else { + state, err = client.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) + if err != nil { + state = index.NodeHealthCheckResponse_UNHEALTHY.String() } } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + return nil } diff --git a/cmd/blast/indexer_node_info.go b/cmd/blast/indexer_node_info.go index ce35cd1..0ab3ad5 100644 --- a/cmd/blast/indexer_node_info.go +++ b/cmd/blast/indexer_node_info.go @@ -24,20 +24,8 @@ import ( ) func indexerNodeInfo(c *cli.Context) error { - clusterGrpcAddr := c.String("cluster-grpc-address") - shardId := c.String("shard-id") - peerGrpcAddr := c.String("peer-grpc-address") - - if clusterGrpcAddr != "" && shardId != "" { - - } else if peerGrpcAddr != "" { - - } - grpcAddr := c.String("grpc-address") - nodeId := c.Args().Get(0) - client, err := indexer.NewGRPCClient(grpcAddr) if err != nil { return err @@ -49,17 +37,17 @@ func indexerNodeInfo(c *cli.Context) error { } }() - metadata, err := client.GetNode(nodeId) + node, err := client.NodeInfo() if err != nil { return err } - metadataBytes, err := json.MarshalIndent(metadata, "", " ") + nodeBytes, err := json.MarshalIndent(node, "", " ") if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(metadataBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(nodeBytes))) return nil } diff --git a/cmd/blast/indexer_start.go b/cmd/blast/indexer_start.go index a716efe..0afd811 100644 --- a/cmd/blast/indexer_start.go +++ b/cmd/blast/indexer_start.go @@ -24,11 +24,12 @@ import ( "github.com/mosuka/blast/indexer" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/logutils" + "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" ) func indexerStart(c *cli.Context) error { - clusterGRPCAddr := c.String("manager-grpc-address") + managerGRPCAddr := c.String("manager-grpc-address") shardId := c.String("shard-id") peerGRPCAddr := c.String("peer-grpc-address") @@ -93,26 +94,14 @@ func indexerStart(c *cli.Context) error { httpLogCompress, ) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - if clusterGRPCAddr != "" { - clusterConfig.ManagerAddr = clusterGRPCAddr - } - if shardId != "" { - clusterConfig.ClusterId = shardId - } - if peerGRPCAddr != "" { - clusterConfig.PeerAddr = peerGRPCAddr - } - - // create node config - nodeConfig := &config.NodeConfig{ - NodeId: nodeId, - BindAddr: nodeAddr, - GRPCAddr: grpcAddr, - HTTPAddr: httpAddr, - DataDir: dataDir, - RaftStorageType: raftStorageType, + node := &index.Node{ + Id: nodeId, + BindAddress: nodeAddr, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddr, + HttpAddress: httpAddr, + }, } var err error @@ -135,7 +124,7 @@ func indexerStart(c *cli.Context) error { IndexStorageType: indexStorageType, } - svr, err := indexer.NewServer(clusterConfig, nodeConfig, indexConfig, logger.Named(nodeId), grpcLogger.Named(nodeId), httpAccessLogger) + svr, err := indexer.NewServer(managerGRPCAddr, shardId, peerGRPCAddr, node, dataDir, raftStorageType, indexConfig, logger.Named(nodeId), grpcLogger.Named(nodeId), httpAccessLogger) if err != nil { return err } diff --git a/cmd/blast/main.go b/cmd/blast/main.go index 1997d3f..e889f32 100644 --- a/cmd/blast/main.go +++ b/cmd/blast/main.go @@ -557,43 +557,27 @@ func main() { Name: "info", Usage: "Get node information", Flags: []cli.Flag{ - //cli.StringFlag{ - // Name: "cluster-grpc-address", - // Value: "", - // Usage: "The gRPC address of the cluster in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "shard-id", - // Value: "", - // Usage: "Shard ID registered in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "peer-grpc-address", - // Value: "", - // Usage: "The gRPC address of the peer node in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "node-id", - // Value: "", - // Usage: "The node ID for which to retrieve the node information", - //}, cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5000", Usage: "The gRPC address of the node for which to retrieve the node information", }, }, Action: indexerNodeInfo, }, { - Name: "health", + Name: "healthcheck", Usage: "Health check the node", Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5000", Usage: "The gRPC listen address", }, + cli.BoolFlag{ + Name: "healthiness", + Usage: "healthiness probe", + }, cli.BoolFlag{ Name: "liveness", Usage: "Liveness probe", @@ -615,29 +599,9 @@ func main() { Name: "info", Usage: "Get cluster information", Flags: []cli.Flag{ - //cli.StringFlag{ - // Name: "cluster-grpc-address", - // Value: "", - // Usage: "The gRPC address of the cluster in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "shard-id", - // Value: "", - // Usage: "Shard ID registered in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "peer-grpc-address", - // Value: "", - // Usage: "The gRPC address of the peer node in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "node-id", - // Value: "", - // Usage: "The node ID for which to retrieve the node information", - //}, cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5000", Usage: "The gRPC address of the node for which to retrieve the node information", }, }, @@ -647,29 +611,9 @@ func main() { Name: "watch", Usage: "Watch cluster", Flags: []cli.Flag{ - //cli.StringFlag{ - // Name: "cluster-grpc-address", - // Value: "", - // Usage: "The gRPC address of the cluster in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "shard-id", - // Value: "", - // Usage: "Shard ID registered in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "peer-grpc-address", - // Value: "", - // Usage: "The gRPC address of the peer node in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "node-id", - // Value: "", - // Usage: "The node ID for which to retrieve the node information", - //}, cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5000", Usage: "The gRPC address of the node for which to retrieve the node information", }, }, @@ -694,11 +638,6 @@ func main() { Value: "", Usage: "The gRPC address of the peer node that exists in the cluster to be joined", }, - cli.StringFlag{ - Name: "grpc-address", - Value: "", - Usage: "The gRPC listen address", - }, cli.StringFlag{ Name: "node-id", Value: "", @@ -715,7 +654,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5000", Usage: "The gRPC listen address", }, cli.StringFlag{ @@ -733,7 +672,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5000", Usage: "The gRPC listen address", }, cli.StringFlag{ @@ -755,7 +694,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5000", Usage: "The gRPC listen address", }, cli.StringFlag{ @@ -773,7 +712,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5000", Usage: "The gRPC listen address", }, cli.StringFlag{ @@ -791,7 +730,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5000", Usage: "The gRPC listen address", }, }, @@ -932,14 +871,18 @@ func main() { Usage: "Command for blast dispatcher node", Subcommands: []cli.Command{ { - Name: "health", + Name: "healthcheck", Usage: "Health check the node", Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5200", Usage: "The gRPC listen address", }, + cli.BoolFlag{ + Name: "healthiness", + Usage: "healthiness probe", + }, cli.BoolFlag{ Name: "liveness", Usage: "Liveness probe", @@ -959,7 +902,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5200", Usage: "The gRPC listen address", }, cli.StringFlag{ @@ -977,7 +920,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5200", Usage: "The gRPC listen address", }, cli.StringFlag{ @@ -999,7 +942,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5200", Usage: "The gRPC listen address", }, cli.StringFlag{ @@ -1017,7 +960,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5200", Usage: "The gRPC listen address", }, cli.StringFlag{ diff --git a/cmd/blast/manager_cluster_leave.go b/cmd/blast/manager_cluster_leave.go index b50a277..408f0ec 100644 --- a/cmd/blast/manager_cluster_leave.go +++ b/cmd/blast/manager_cluster_leave.go @@ -29,10 +29,9 @@ func managerClusterLeave(c *cli.Context) error { // get grpc address of leader node } - grpcAddr := c.String("grpc-address") nodeId := c.String("node-id") - client, err := manager.NewGRPCClient(grpcAddr) + client, err := manager.NewGRPCClient(peerGrpcAddr) if err != nil { return err } diff --git a/cmd/blast/manager_node_info.go b/cmd/blast/manager_node_info.go index 55f0e1d..85314a2 100644 --- a/cmd/blast/manager_node_info.go +++ b/cmd/blast/manager_node_info.go @@ -37,17 +37,17 @@ func managerNodeInfo(c *cli.Context) error { } }() - metadata, err := client.NodeInfo() + node, err := client.NodeInfo() if err != nil { return err } - metadataBytes, err := json.MarshalIndent(metadata, "", " ") + nodeBytes, err := json.MarshalIndent(node, "", " ") if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(metadataBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(nodeBytes))) return nil } diff --git a/cmd/blast/manager_watch.go b/cmd/blast/manager_watch.go index 273927b..bab09af 100644 --- a/cmd/blast/manager_watch.go +++ b/cmd/blast/manager_watch.go @@ -70,7 +70,7 @@ func managerWatch(c *cli.Context) error { switch value.(type) { case *map[string]interface{}: valueMap := *value.(*map[string]interface{}) - valueBytes, err = json.MarshalIndent(valueMap, "", " ") + valueBytes, err = json.Marshal(valueMap) if err != nil { return err } diff --git a/dispatcher/grpc_client.go b/dispatcher/grpc_client.go index a042b07..b1e4820 100644 --- a/dispatcher/grpc_client.go +++ b/dispatcher/grpc_client.go @@ -21,7 +21,6 @@ import ( "github.com/blevesearch/bleve" "github.com/golang/protobuf/ptypes/any" - "github.com/golang/protobuf/ptypes/empty" blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/protobuf" @@ -97,23 +96,25 @@ func (c *GRPCClient) GetAddress() string { return c.conn.Target() } -func (c *GRPCClient) LivenessProbe(opts ...grpc.CallOption) (string, error) { - resp, err := c.client.LivenessProbe(c.ctx, &empty.Empty{}) - if err != nil { - st, _ := status.FromError(err) +func (c *GRPCClient) NodeHealthCheck(probe string, opts ...grpc.CallOption) (string, error) { + req := &distribute.NodeHealthCheckRequest{} - return distribute.LivenessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) + switch probe { + case distribute.NodeHealthCheckRequest_HEALTHINESS.String(): + req.Probe = distribute.NodeHealthCheckRequest_HEALTHINESS + case distribute.NodeHealthCheckRequest_LIVENESS.String(): + req.Probe = distribute.NodeHealthCheckRequest_LIVENESS + case distribute.NodeHealthCheckRequest_READINESS.String(): + req.Probe = distribute.NodeHealthCheckRequest_READINESS + default: + req.Probe = distribute.NodeHealthCheckRequest_HEALTHINESS } - return resp.State.String(), nil -} - -func (c *GRPCClient) ReadinessProbe(opts ...grpc.CallOption) (string, error) { - resp, err := c.client.ReadinessProbe(c.ctx, &empty.Empty{}) + resp, err := c.client.NodeHealthCheck(c.ctx, req, opts...) if err != nil { st, _ := status.FromError(err) - return distribute.ReadinessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) + return distribute.NodeHealthCheckResponse_UNHEALTHY.String(), errors.New(st.Message()) } return resp.State.String(), nil diff --git a/dispatcher/grpc_service.go b/dispatcher/grpc_service.go index ed5ca8d..f51c94c 100644 --- a/dispatcher/grpc_service.go +++ b/dispatcher/grpc_service.go @@ -16,19 +16,20 @@ package dispatcher import ( "context" + "encoding/json" "errors" "hash/fnv" "io" "math/rand" - "reflect" "sort" "sync" "time" + "github.com/mosuka/blast/protobuf/index" + "github.com/blevesearch/bleve" "github.com/blevesearch/bleve/search" "github.com/golang/protobuf/ptypes/any" - "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/indexer" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/manager" @@ -42,34 +43,51 @@ import ( ) type GRPCService struct { - managerAddr string - logger *zap.Logger + managerGrpcAddress string + logger *zap.Logger managers *management.Cluster managerClients map[string]*manager.GRPCClient updateManagersStopCh chan struct{} updateManagersDoneCh chan struct{} - indexers map[string]interface{} + //indexers map[string]interface{} + indexers map[string]*index.Cluster indexerClients map[string]map[string]*indexer.GRPCClient updateIndexersStopCh chan struct{} updateIndexersDoneCh chan struct{} } -func NewGRPCService(managerAddr string, logger *zap.Logger) (*GRPCService, error) { +func NewGRPCService(managerGrpcAddress string, logger *zap.Logger) (*GRPCService, error) { return &GRPCService{ - managerAddr: managerAddr, - logger: logger, + managerGrpcAddress: managerGrpcAddress, + logger: logger, managers: &management.Cluster{Nodes: make(map[string]*management.Node, 0)}, managerClients: make(map[string]*manager.GRPCClient, 0), - indexers: make(map[string]interface{}, 0), + //indexers: make(map[string]interface{}, 0), + indexers: make(map[string]*index.Cluster, 0), indexerClients: make(map[string]map[string]*indexer.GRPCClient, 0), }, nil } func (s *GRPCService) Start() error { + var err error + s.managers, err = s.getManagerCluster(s.managerGrpcAddress) + if err != nil { + s.logger.Fatal(err.Error()) + return err + } + + for id, node := range s.managers.Nodes { + client, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) + if err != nil { + s.logger.Fatal(err.Error(), zap.String("id", id), zap.String("grpc_address", s.managerGrpcAddress)) + } + s.managerClients[node.Id] = client + } + s.logger.Info("start to update manager cluster info") go s.startUpdateManagers(500 * time.Millisecond) @@ -80,12 +98,12 @@ func (s *GRPCService) Start() error { } func (s *GRPCService) Stop() error { - s.logger.Info("stop to update manager cluster info") - s.stopUpdateManagers() - s.logger.Info("stop to update indexer cluster info") s.stopUpdateIndexers() + s.logger.Info("stop to update manager cluster info") + s.stopUpdateManagers() + return nil } @@ -117,8 +135,8 @@ func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { return nil, err } -func (s *GRPCService) getInitialManagers(managerAddr string) (*management.Cluster, error) { - client, err := manager.NewGRPCClient(s.managerAddr) +func (s *GRPCService) getManagerCluster(managerAddr string) (*management.Cluster, error) { + client, err := manager.NewGRPCClient(managerAddr) defer func() { err := client.Close() if err != nil { @@ -140,6 +158,21 @@ func (s *GRPCService) getInitialManagers(managerAddr string) (*management.Cluste return managers, nil } +func (s *GRPCService) cloneManagerCluster(cluster *management.Cluster) (*management.Cluster, error) { + b, err := json.Marshal(cluster) + if err != nil { + return nil, err + } + + var clone *management.Cluster + err = json.Unmarshal(b, &clone) + if err != nil { + return nil, err + } + + return clone, nil +} + func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.updateManagersStopCh = make(chan struct{}) s.updateManagersDoneCh = make(chan struct{}) @@ -148,51 +181,20 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { close(s.updateManagersDoneCh) }() - var err error - - // get initial managers - s.managers, err = s.getInitialManagers(s.managerAddr) - if err != nil { - s.logger.Error(err.Error()) - return - } - s.logger.Debug("initialize manager list", zap.Any("managers", s.managers)) - - // create clients for managers - for nodeId, node := range s.managers.Nodes { - if node.Metadata == nil { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId)) - continue - } - - if node.Metadata.GrpcAddress == "" { - s.logger.Warn("missing gRPC address", zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - client, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - } - if client != nil { - s.managerClients[nodeId] = client - } - } - for { select { case <-s.updateManagersStopCh: s.logger.Info("received a request to stop updating a manager cluster") return default: + // get client for manager from the list client, err := s.getManagerClient() if err != nil { s.logger.Error(err.Error()) continue } - // create stream + // create stream for watching cluster changes stream, err := client.ClusterWatch() if err != nil { s.logger.Error(err.Error()) @@ -209,80 +211,77 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.logger.Error(err.Error()) continue } - managers := resp.Cluster - - if !reflect.DeepEqual(s.managers, managers) { - // open clients - for nodeId, node := range managers.Nodes { - if node.Metadata == nil { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId)) - continue + s.logger.Info("cluster has changed", zap.Any("resp", resp)) + switch resp.Event { + case management.ClusterWatchResponse_JOIN, management.ClusterWatchResponse_UPDATE: + // add to cluster nodes + s.managers.Nodes[resp.Node.Id] = resp.Node + + // check node state + switch resp.Node.State { + case management.Node_UNKNOWN, management.Node_SHUTDOWN: + // close client + if client, exist := s.managerClients[resp.Node.Id]; exist { + s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) + err = client.Close() + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id)) + } + delete(s.managerClients, resp.Node.Id) } - - if node.Metadata.GrpcAddress == "" { - s.logger.Warn("missing gRPC address", zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + default: // management.Node_FOLLOWER, management.Node_CANDIDATE, management.Node_LEADER + if resp.Node.Metadata.GrpcAddress == "" { + s.logger.Warn("missing gRPC address", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) continue } - client, exist := s.managerClients[nodeId] - if exist { - s.logger.Debug("client has already exist in manager list", zap.String("node_id", nodeId)) - - if client.GetAddress() != node.Metadata.GrpcAddress { - s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - - delete(s.managerClients, nodeId) - + // check client that already exist in the client list + if client, exist := s.managerClients[resp.Node.Id]; !exist { + // create new client + s.logger.Info("create gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) + newClient, err := manager.NewGRPCClient(resp.Node.Metadata.GrpcAddress) + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) + continue + } + s.managerClients[resp.Node.Id] = newClient + } else { + if client.GetAddress() != resp.Node.Metadata.GrpcAddress { + // close client + s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) err = client.Close() if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId)) + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id)) } + delete(s.managerClients, resp.Node.Id) - newClient, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) + // re-create new client + s.logger.Info("re-create gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) + newClient, err := manager.NewGRPCClient(resp.Node.Metadata.GrpcAddress) if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) + continue } - - if newClient != nil { - s.managerClients[nodeId] = newClient - } - } else { - s.logger.Debug("gRPC address has not changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - } - } else { - s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) - - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - newClient, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - } - if newClient != nil { - s.managerClients[nodeId] = newClient + s.managerClients[resp.Node.Id] = newClient } } } - - // close nonexistent clients - for nodeId, client := range s.managerClients { - if nodeConfig, exist := managers.Nodes[nodeId]; !exist { - s.logger.Info("this client is no longer in use", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - - s.logger.Debug("close client", zap.String("node_id", nodeId), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", client.GetAddress())) - } - - s.logger.Debug("delete client", zap.String("node_id", nodeId)) - delete(s.managerClients, nodeId) + case management.ClusterWatchResponse_LEAVE: + if client, exist := s.managerClients[resp.Node.Id]; exist { + s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) + err = client.Close() + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) } + delete(s.managerClients, resp.Node.Id) } - // keep current manager cluster - s.managers = managers - s.logger.Debug("managers", zap.Any("managers", s.managers)) + if _, exist := s.managers.Nodes[resp.Node.Id]; exist { + delete(s.managers.Nodes, resp.Node.Id) + } + default: + s.logger.Debug("unknown event", zap.Any("event", resp.Event)) + continue } } } @@ -316,16 +315,6 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { close(s.updateIndexersDoneCh) }() - // wait for manager available - s.logger.Info("wait for manager clients are available") - for { - if len(s.managerClients) > 0 { - s.logger.Info("manager clients are available") - break - } - time.Sleep(100 * time.Millisecond) - } - // get active client for manager client, err := s.getManagerClient() if err != nil { @@ -333,57 +322,44 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { } // get initial indexers - clusters, err := client.Get("/cluster_config/clusters/") + shards, err := client.Get("/cluster/shards") if err != nil { - s.logger.Error(err.Error()) + s.logger.Fatal(err.Error()) + return } - if clusters == nil { - s.logger.Error("nil") + if shards == nil { + s.logger.Error("/cluster/shards is nil") } - s.indexers = *clusters.(*map[string]interface{}) - - // create clients for indexer - for clusterId, cluster := range s.indexers { - cm, ok := cluster.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("cluster_id", clusterId), zap.Any("cluster", cm)) + for shardId, shardIntr := range *shards.(*map[string]interface{}) { + shardBytes, err := json.Marshal(shardIntr) + if err != nil { + s.logger.Error(err.Error()) continue } - nodes, ok := cm["nodes"].(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("cluster_id", clusterId), zap.Any("nodes", nodes)) + var shard *index.Cluster + err = json.Unmarshal(shardBytes, &shard) + if err != nil { + s.logger.Error(err.Error()) continue } - for nodeId, node := range nodes { - nm, ok := node.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("node_id", nodeId)) - continue - } - - metadata, ok := nm["node_config"].(map[string]interface{}) - if !ok { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.Any("node_config", metadata)) - continue - } + s.indexers[shardId] = shard - grpcAddr, ok := metadata["grpc_addr"].(string) - if !ok { - s.logger.Warn("missing gRPC address", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + for nodeId, node := range shard.Nodes { + if node.Metadata.GrpcAddress == "" { + s.logger.Warn("missing gRPC address", zap.String("id", node.Id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) continue } - - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - client, err := indexer.NewGRPCClient(metadata["grpc_addr"].(string)) + newClient, err := indexer.NewGRPCClient(node.Metadata.GrpcAddress) if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + continue } - if _, exist := s.indexerClients[clusterId]; !exist { - s.indexerClients[clusterId] = make(map[string]*indexer.GRPCClient) + if _, exist := s.indexerClients[shardId]; !exist { + s.indexerClients[shardId] = make(map[string]*indexer.GRPCClient) } - s.indexerClients[clusterId][nodeId] = client + s.indexerClients[shardId][nodeId] = newClient } } @@ -399,7 +375,7 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { continue } - stream, err := client.Watch("/cluster_config/clusters/") + stream, err := client.Watch("/cluster/shards/") if err != nil { s.logger.Error(err.Error()) continue @@ -414,95 +390,85 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { s.logger.Error(err.Error()) continue } - s.logger.Debug("data has changed", zap.String("key", resp.Key)) + s.logger.Debug("data has changed", zap.Any("command", resp.Command), zap.String("key", resp.Key), zap.Any("value", resp.Value)) - cluster, err := client.Get("/cluster_config/clusters/") + shardsIntr, err := client.Get("/cluster/shards/") if err != nil { s.logger.Error(err.Error()) continue } - if cluster == nil { - s.logger.Error("nil") + if shardsIntr == nil { + s.logger.Error("/cluster/shards is nil") continue } - indexers := *cluster.(*map[string]interface{}) - - // compare previous manager with current manager - if !reflect.DeepEqual(s.indexers, indexers) { - // create clients for indexer - for clusterId, cluster := range s.indexers { - cm, ok := cluster.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("cluster_id", clusterId), zap.Any("cluster", cm)) - continue - } + for shardId, shardIntr := range *shards.(*map[string]interface{}) { + shardBytes, err := json.Marshal(shardIntr) + if err != nil { + s.logger.Error(err.Error()) + continue + } - nodes, ok := cm["nodes"].(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("cluster_id", clusterId), zap.Any("nodes", nodes)) - continue - } + var shard *index.Cluster + err = json.Unmarshal(shardBytes, &shard) + if err != nil { + s.logger.Error(err.Error()) + continue + } - for nodeId, node := range nodes { - nm, ok := node.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("node_id", nodeId)) - continue - } + s.indexers[shardId] = shard - nodeConfig, ok := nm["node_config"].(map[string]interface{}) - if !ok { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - continue - } + if _, exist := s.indexerClients[shardId]; !exist { + s.indexerClients[shardId] = make(map[string]*indexer.GRPCClient) + } - grpcAddr, ok := nodeConfig["grpc_addr"].(string) - if !ok { - s.logger.Warn("missing gRPC address", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + // open clients for indexer nodes + for nodeId, node := range shard.Nodes { + if node.Metadata.GrpcAddress == "" { + s.logger.Warn("missing gRPC address", zap.String("id", node.Id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + continue + } + + // check client that already exist in the client list + if client, exist := s.indexerClients[shardId][node.Id]; !exist { + // create new client + newClient, err := indexer.NewGRPCClient(node.Metadata.GrpcAddress) + if err != nil { + s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) continue } - - client, exist := s.indexerClients[clusterId][nodeId] - if exist { - s.logger.Debug("client has already exist in manager list", zap.String("node_id", nodeId)) - - if client.GetAddress() != grpcAddr { - s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) - s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - - delete(s.indexerClients[clusterId], nodeId) - - err = client.Close() - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId)) - } - - newClient, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - } - - if newClient != nil { - s.indexerClients[clusterId][nodeId] = newClient - } + s.indexerClients[shardId][nodeId] = newClient + } else { + if client.GetAddress() != node.Metadata.GrpcAddress { + // close client + s.logger.Info("close gRPC client", zap.String("id", node.Id), zap.String("grpc_addr", client.GetAddress())) + err = client.Close() + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", node.Id)) } + delete(s.indexerClients[shardId], node.Id) - } else { - s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) - - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - newClient, err := indexer.NewGRPCClient(nodeConfig["grpc_addr"].(string)) + // re-create new client + newClient, err := indexer.NewGRPCClient(node.Metadata.GrpcAddress) if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + continue } - if _, exist := s.indexerClients[clusterId]; !exist { - s.indexerClients[clusterId] = make(map[string]*indexer.GRPCClient) - } - s.indexerClients[clusterId][nodeId] = newClient + s.indexerClients[shardId][nodeId] = newClient } } } + // close clients for non-existent indexer nodes + for id, client := range s.indexerClients[shardId] { + if _, exist := s.indexers[shardId].Nodes[id]; !exist { + s.logger.Info("close gRPC client", zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) + err = client.Close() + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) + } + delete(s.indexerClients[shardId], id) + } + } } } } @@ -548,17 +514,16 @@ func (s *GRPCService) getIndexerClients() map[string]*indexer.GRPCClient { return indexerClients } -func (s *GRPCService) LivenessProbe(ctx context.Context, req *empty.Empty) (*distribute.LivenessProbeResponse, error) { - resp := &distribute.LivenessProbeResponse{ - State: distribute.LivenessProbeResponse_ALIVE, - } - - return resp, nil -} +func (s *GRPCService) NodeHealthCheck(ctx context.Context, req *distribute.NodeHealthCheckRequest) (*distribute.NodeHealthCheckResponse, error) { + resp := &distribute.NodeHealthCheckResponse{} -func (s *GRPCService) ReadinessProbe(ctx context.Context, req *empty.Empty) (*distribute.ReadinessProbeResponse, error) { - resp := &distribute.ReadinessProbeResponse{ - State: distribute.ReadinessProbeResponse_READY, + switch req.Probe { + case distribute.NodeHealthCheckRequest_HEALTHINESS: + resp.State = distribute.NodeHealthCheckResponse_HEALTHY + case distribute.NodeHealthCheckRequest_LIVENESS: + resp.State = distribute.NodeHealthCheckResponse_ALIVE + case distribute.NodeHealthCheckRequest_READINESS: + resp.State = distribute.NodeHealthCheckResponse_READY } return resp, nil diff --git a/dispatcher/server_test.go b/dispatcher/server_test.go index 2aae862..29d0577 100644 --- a/dispatcher/server_test.go +++ b/dispatcher/server_test.go @@ -22,11 +22,11 @@ import ( "testing" "time" - "github.com/hashicorp/raft" "github.com/mosuka/blast/config" "github.com/mosuka/blast/indexer" "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf/index" "github.com/mosuka/blast/protobuf/management" "github.com/mosuka/blast/strutils" "github.com/mosuka/blast/testutils" @@ -205,69 +205,122 @@ func TestServer_Start(t *testing.T) { // // indexer cluster1 // - // create cluster config - indexerClusterConfig1 := config.DefaultClusterConfig() - indexerClusterConfig1.ManagerAddr = managerGrpcAddress1 - indexerClusterConfig1.ClusterId = "cluster1" - // create node config - indexerNodeConfig1 := testutils.TmpNodeConfig() + indexerManagerGrpcAddress1 := managerGrpcAddress1 + indexerShardId1 := "shard-1" + indexerPeerGrpcAddress1 := "" + indexerGrpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerHttpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerNodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + indexerBindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerDataDir1 := testutils.TmpDir() defer func() { - _ = os.RemoveAll(indexerNodeConfig1.DataDir) + _ = os.RemoveAll(indexerDataDir1) }() - indexer1, err := indexer.NewServer(indexerClusterConfig1, indexerNodeConfig1, config.DefaultIndexConfig(), logger.Named("indexer1"), grpcLogger.Named("indexer1"), httpAccessLogger) + indexerRaftStorageType1 := "boltdb" + + indexerNode1 := &index.Node{ + Id: indexerNodeId1, + BindAddress: indexerBindAddress1, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress1, + HttpAddress: indexerHttpAddress1, + }, + } + indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + indexerServer1, err := indexer.NewServer(indexerManagerGrpcAddress1, indexerShardId1, indexerPeerGrpcAddress1, indexerNode1, indexerDataDir1, indexerRaftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { - if indexer1 != nil { - indexer1.Stop() - } + indexerServer1.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server - indexer1.Start() + indexerServer1.Start() + // sleep time.Sleep(5 * time.Second) - // create node config - indexerNodeConfig2 := testutils.TmpNodeConfig() + indexerManagerGrpcAddress2 := managerGrpcAddress1 + indexerShardId2 := "shard-1" + indexerPeerGrpcAddress2 := "" + indexerGrpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerHttpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerNodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + indexerBindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerDataDir2 := testutils.TmpDir() defer func() { - _ = os.RemoveAll(indexerNodeConfig2.DataDir) + _ = os.RemoveAll(indexerDataDir2) }() - indexer2, err := indexer.NewServer(indexerClusterConfig1, indexerNodeConfig2, config.DefaultIndexConfig(), logger.Named("indexer2"), grpcLogger.Named("indexer2"), httpAccessLogger) + indexerRaftStorageType2 := "boltdb" + + indexerNode2 := &index.Node{ + Id: indexerNodeId2, + BindAddress: indexerBindAddress2, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress2, + HttpAddress: indexerHttpAddress2, + }, + } + indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + indexerServer2, err := indexer.NewServer(indexerManagerGrpcAddress2, indexerShardId2, indexerPeerGrpcAddress2, indexerNode2, indexerDataDir2, indexerRaftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { - if indexer2 != nil { - indexer2.Stop() - } + indexerServer2.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server - indexer2.Start() + indexerServer2.Start() + // sleep time.Sleep(5 * time.Second) - // create node config - indexerNodeConfig3 := testutils.TmpNodeConfig() + indexerManagerGrpcAddress3 := managerGrpcAddress1 + indexerShardId3 := "shard-1" + indexerPeerGrpcAddress3 := "" + indexerGrpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerHttpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerNodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + indexerBindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerDataDir3 := testutils.TmpDir() defer func() { - _ = os.RemoveAll(indexerNodeConfig3.DataDir) + _ = os.RemoveAll(indexerDataDir3) }() - indexer3, err := indexer.NewServer(indexerClusterConfig1, indexerNodeConfig3, config.DefaultIndexConfig(), logger.Named("indexer3"), grpcLogger.Named("indexer3"), httpAccessLogger) + indexerRaftStorageType3 := "boltdb" + + indexerNode3 := &index.Node{ + Id: indexerNodeId3, + BindAddress: indexerBindAddress3, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress3, + HttpAddress: indexerHttpAddress3, + }, + } + indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + indexerServer3, err := indexer.NewServer(indexerManagerGrpcAddress3, indexerShardId3, indexerPeerGrpcAddress3, indexerNode3, indexerDataDir3, indexerRaftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { - if indexer3 != nil { - indexer3.Stop() - } + indexerServer3.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server - indexer3.Start() + indexerServer3.Start() + // sleep time.Sleep(5 * time.Second) // gRPC client for manager1 - indexerClient1, err := indexer.NewGRPCClient(indexerNodeConfig1.GRPCAddr) + indexerClient1, err := indexer.NewGRPCClient(indexerNode1.Metadata.GrpcAddress) defer func() { _ = indexerClient1.Close() }() @@ -275,119 +328,165 @@ func TestServer_Start(t *testing.T) { t.Fatalf("%v", err) } // get cluster info from manager1 - indexerCluster1, err := indexerClient1.GetCluster() + indexerCluster1, err := indexerClient1.ClusterInfo() if err != nil { t.Fatalf("%v", err) } - expIndexerCluster1 := map[string]interface{}{ - indexerNodeConfig1.NodeId: map[string]interface{}{ - "node_config": indexerNodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - indexerNodeConfig2.NodeId: map[string]interface{}{ - "node_config": indexerNodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - indexerNodeConfig3.NodeId: map[string]interface{}{ - "node_config": indexerNodeConfig3.ToMap(), - "state": raft.Follower.String(), + expIndexerCluster1 := &index.Cluster{ + Nodes: map[string]*index.Node{ + indexerNodeId1: { + Id: indexerNodeId1, + BindAddress: indexerBindAddress1, + State: index.Node_LEADER, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress1, + HttpAddress: indexerHttpAddress1, + }, + }, + indexerNodeId2: { + Id: indexerNodeId2, + BindAddress: indexerBindAddress2, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress2, + HttpAddress: indexerHttpAddress2, + }, + }, + indexerNodeId3: { + Id: indexerNodeId3, + BindAddress: indexerBindAddress3, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress3, + HttpAddress: indexerHttpAddress3, + }, + }, }, } actIndexerCluster1 := indexerCluster1 - expIndexerNodeConfig1 := expIndexerCluster1[indexerNodeConfig1.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actIndexerNodeConfig1 := actIndexerCluster1[indexerNodeConfig1.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expIndexerNodeConfig1, actIndexerNodeConfig1) { - t.Fatalf("expected content to see %v, saw %v", expIndexerNodeConfig1, actIndexerNodeConfig1) - } - actIndexerState1 := actIndexerCluster1[indexerNodeConfig1.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actIndexerState1 && raft.Follower.String() != actIndexerState1 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actIndexerState1) - } - expIndexerNodeConfig2 := expIndexerCluster1[indexerNodeConfig2.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actIndexerNodeConfig2 := actIndexerCluster1[indexerNodeConfig2.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expIndexerNodeConfig2, actIndexerNodeConfig2) { - t.Fatalf("expected content to see %v, saw %v", expIndexerNodeConfig2, actIndexerNodeConfig2) - } - actIndexerState2 := actIndexerCluster1[indexerNodeConfig2.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actIndexerState2 && raft.Follower.String() != actIndexerState2 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actIndexerState2) - } - expIndexerNodeConfig3 := expIndexerCluster1[indexerNodeConfig3.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actIndexerNodeConfig3 := actIndexerCluster1[indexerNodeConfig3.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expIndexerNodeConfig3, actIndexerNodeConfig3) { - t.Fatalf("expected content to see %v, saw %v", expIndexerNodeConfig3, actIndexerNodeConfig3) - } - actIndexerState3 := actIndexerCluster1[indexerNodeConfig3.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actIndexerState3 && raft.Follower.String() != actIndexerState3 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actIndexerState3) + if !reflect.DeepEqual(expIndexerCluster1, actIndexerCluster1) { + t.Fatalf("expected content to see %v, saw %v", expIndexerCluster1, actIndexerCluster1) } // // indexer cluster2 // - // create cluster config - indexerClusterConfig2 := config.DefaultClusterConfig() - indexerClusterConfig2.ManagerAddr = managerGrpcAddress1 - indexerClusterConfig2.ClusterId = "cluster2" - // create node config - indexerNodeConfig4 := testutils.TmpNodeConfig() + indexerManagerGrpcAddress4 := managerGrpcAddress1 + indexerShardId4 := "shard-2" + indexerPeerGrpcAddress4 := "" + indexerGrpcAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerHttpAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerNodeId4 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + indexerBindAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerDataDir4 := testutils.TmpDir() defer func() { - _ = os.RemoveAll(indexerNodeConfig4.DataDir) + _ = os.RemoveAll(indexerDataDir4) }() - indexer4, err := indexer.NewServer(indexerClusterConfig2, indexerNodeConfig4, config.DefaultIndexConfig(), logger.Named("indexer4"), grpcLogger.Named("indexer4"), httpAccessLogger) + indexerRaftStorageType4 := "boltdb" + + indexerNode4 := &index.Node{ + Id: indexerNodeId4, + BindAddress: indexerBindAddress4, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress4, + HttpAddress: indexerHttpAddress4, + }, + } + indexConfig4, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + indexerServer4, err := indexer.NewServer(indexerManagerGrpcAddress4, indexerShardId4, indexerPeerGrpcAddress4, indexerNode4, indexerDataDir4, indexerRaftStorageType4, indexConfig4, logger, grpcLogger, httpAccessLogger) defer func() { - if indexer4 != nil { - indexer4.Stop() - } + indexerServer4.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server - indexer4.Start() + indexerServer4.Start() + // sleep time.Sleep(5 * time.Second) - // create node config - indexerNodeConfig5 := testutils.TmpNodeConfig() + indexerManagerGrpcAddress5 := managerGrpcAddress1 + indexerShardId5 := "shard-2" + indexerPeerGrpcAddress5 := "" + indexerGrpcAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerHttpAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerNodeId5 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + indexerBindAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerDataDir5 := testutils.TmpDir() defer func() { - _ = os.RemoveAll(indexerNodeConfig5.DataDir) + _ = os.RemoveAll(indexerDataDir5) }() - indexer5, err := indexer.NewServer(indexerClusterConfig2, indexerNodeConfig5, config.DefaultIndexConfig(), logger.Named("indexer5"), grpcLogger.Named("indexer5"), httpAccessLogger) + indexerRaftStorageType5 := "boltdb" + + indexerNode5 := &index.Node{ + Id: indexerNodeId5, + BindAddress: indexerBindAddress5, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress5, + HttpAddress: indexerHttpAddress5, + }, + } + indexConfig5, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + indexerServer5, err := indexer.NewServer(indexerManagerGrpcAddress5, indexerShardId5, indexerPeerGrpcAddress5, indexerNode5, indexerDataDir5, indexerRaftStorageType5, indexConfig5, logger, grpcLogger, httpAccessLogger) defer func() { - if indexer5 != nil { - indexer5.Stop() - } + indexerServer5.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server - indexer5.Start() + indexerServer5.Start() + // sleep time.Sleep(5 * time.Second) - // create node config - indexerNodeConfig6 := testutils.TmpNodeConfig() + indexerManagerGrpcAddress6 := managerGrpcAddress1 + indexerShardId6 := "shard-2" + indexerPeerGrpcAddress6 := "" + indexerGrpcAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerHttpAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerNodeId6 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + indexerBindAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerDataDir6 := testutils.TmpDir() defer func() { - _ = os.RemoveAll(indexerNodeConfig6.DataDir) + _ = os.RemoveAll(indexerDataDir6) }() - indexer6, err := indexer.NewServer(indexerClusterConfig2, indexerNodeConfig6, config.DefaultIndexConfig(), logger.Named("indexer6"), grpcLogger.Named("indexer6"), httpAccessLogger) + indexerRaftStorageType6 := "boltdb" + + indexerNode6 := &index.Node{ + Id: indexerNodeId6, + BindAddress: indexerBindAddress6, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress6, + HttpAddress: indexerHttpAddress6, + }, + } + indexConfig6, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + indexerServer6, err := indexer.NewServer(indexerManagerGrpcAddress6, indexerShardId6, indexerPeerGrpcAddress6, indexerNode6, indexerDataDir6, indexerRaftStorageType6, indexConfig6, logger, grpcLogger, httpAccessLogger) defer func() { - if indexer6 != nil { - indexer6.Stop() - } + indexerServer6.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server - indexer6.Start() + indexerServer6.Start() + // sleep time.Sleep(5 * time.Second) // gRPC client for manager1 - indexerClient2, err := indexer.NewGRPCClient(indexerNodeConfig4.GRPCAddr) + indexerClient2, err := indexer.NewGRPCClient(indexerNode4.Metadata.GrpcAddress) defer func() { _ = indexerClient1.Close() }() @@ -395,51 +494,44 @@ func TestServer_Start(t *testing.T) { t.Fatalf("%v", err) } // get cluster info from manager1 - indexerCluster2, err := indexerClient2.GetCluster() + indexerCluster2, err := indexerClient2.ClusterInfo() if err != nil { t.Fatalf("%v", err) } - expIndexerCluster2 := map[string]interface{}{ - indexerNodeConfig4.NodeId: map[string]interface{}{ - "node_config": indexerNodeConfig4.ToMap(), - "state": raft.Leader.String(), - }, - indexerNodeConfig5.NodeId: map[string]interface{}{ - "node_config": indexerNodeConfig5.ToMap(), - "state": raft.Follower.String(), - }, - indexerNodeConfig6.NodeId: map[string]interface{}{ - "node_config": indexerNodeConfig6.ToMap(), - "state": raft.Follower.String(), + expIndexerCluster2 := &index.Cluster{ + Nodes: map[string]*index.Node{ + indexerNodeId4: { + Id: indexerNodeId4, + BindAddress: indexerBindAddress4, + State: index.Node_LEADER, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress4, + HttpAddress: indexerHttpAddress4, + }, + }, + indexerNodeId5: { + Id: indexerNodeId5, + BindAddress: indexerBindAddress5, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress5, + HttpAddress: indexerHttpAddress5, + }, + }, + indexerNodeId6: { + Id: indexerNodeId6, + BindAddress: indexerBindAddress6, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress6, + HttpAddress: indexerHttpAddress6, + }, + }, }, } actIndexerCluster2 := indexerCluster2 - expIndexerNodeConfig4 := expIndexerCluster2[indexerNodeConfig4.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actIndexerNodeConfig4 := actIndexerCluster2[indexerNodeConfig4.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expIndexerNodeConfig4, actIndexerNodeConfig4) { - t.Fatalf("expected content to see %v, saw %v", expIndexerNodeConfig4, actIndexerNodeConfig4) - } - actIndexerState4 := actIndexerCluster2[indexerNodeConfig4.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actIndexerState4 && raft.Follower.String() != actIndexerState4 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actIndexerState4) - } - expIndexerNodeConfig5 := expIndexerCluster2[indexerNodeConfig5.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actIndexerNodeConfig5 := actIndexerCluster2[indexerNodeConfig5.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expIndexerNodeConfig5, actIndexerNodeConfig5) { - t.Fatalf("expected content to see %v, saw %v", expIndexerNodeConfig5, actIndexerNodeConfig5) - } - actIndexerState5 := actIndexerCluster2[indexerNodeConfig5.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actIndexerState5 && raft.Follower.String() != actIndexerState5 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actIndexerState5) - } - expIndexerNodeConfig6 := expIndexerCluster2[indexerNodeConfig6.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actIndexerNodeConfig6 := actIndexerCluster2[indexerNodeConfig6.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expIndexerNodeConfig6, actIndexerNodeConfig6) { - t.Fatalf("expected content to see %v, saw %v", expIndexerNodeConfig6, actIndexerNodeConfig6) - } - actIndexerState6 := actIndexerCluster2[indexerNodeConfig6.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actIndexerState6 && raft.Follower.String() != actIndexerState6 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actIndexerState6) + if !reflect.DeepEqual(expIndexerCluster2, actIndexerCluster2) { + t.Fatalf("expected content to see %v, saw %v", expIndexerCluster2, actIndexerCluster2) } // diff --git a/indexer/grpc_client.go b/indexer/grpc_client.go index e5cdbf6..e955e3b 100644 --- a/indexer/grpc_client.go +++ b/indexer/grpc_client.go @@ -97,64 +97,47 @@ func (c *GRPCClient) GetAddress() string { return c.conn.Target() } -func (c *GRPCClient) LivenessProbe(opts ...grpc.CallOption) (string, error) { - resp, err := c.client.LivenessProbe(c.ctx, &empty.Empty{}) - if err != nil { - st, _ := status.FromError(err) +func (c *GRPCClient) NodeHealthCheck(probe string, opts ...grpc.CallOption) (string, error) { + req := &index.NodeHealthCheckRequest{} - return index.LivenessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) + switch probe { + case index.NodeHealthCheckRequest_HEALTHINESS.String(): + req.Probe = index.NodeHealthCheckRequest_HEALTHINESS + case index.NodeHealthCheckRequest_LIVENESS.String(): + req.Probe = index.NodeHealthCheckRequest_LIVENESS + case index.NodeHealthCheckRequest_READINESS.String(): + req.Probe = index.NodeHealthCheckRequest_READINESS + default: + req.Probe = index.NodeHealthCheckRequest_HEALTHINESS } - return resp.State.String(), nil -} - -func (c *GRPCClient) ReadinessProbe(opts ...grpc.CallOption) (string, error) { - resp, err := c.client.ReadinessProbe(c.ctx, &empty.Empty{}) + resp, err := c.client.NodeHealthCheck(c.ctx, req, opts...) if err != nil { st, _ := status.FromError(err) - return index.ReadinessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) + return index.NodeHealthCheckResponse_UNHEALTHY.String(), errors.New(st.Message()) } return resp.State.String(), nil } -func (c *GRPCClient) GetNode(id string, opts ...grpc.CallOption) (map[string]interface{}, error) { - req := &index.GetNodeRequest{ - Id: id, - } - - resp, err := c.client.GetNode(c.ctx, req, opts...) +func (c *GRPCClient) NodeInfo(opts ...grpc.CallOption) (*index.Node, error) { + resp, err := c.client.NodeInfo(c.ctx, &empty.Empty{}, opts...) if err != nil { st, _ := status.FromError(err) return nil, errors.New(st.Message()) } - ins, err := protobuf.MarshalAny(resp.NodeConfig) - nodeConfig := *ins.(*map[string]interface{}) - - node := map[string]interface{}{ - "node_config": nodeConfig, - "state": resp.State, - } - - return node, nil + return resp.Node, nil } -func (c *GRPCClient) SetNode(id string, nodeConfig map[string]interface{}, opts ...grpc.CallOption) error { - nodeConfigAny := &any.Any{} - err := protobuf.UnmarshalAny(nodeConfig, nodeConfigAny) - if err != nil { - return err - } - - req := &index.SetNodeRequest{ - Id: id, - NodeConfig: nodeConfigAny, +func (c *GRPCClient) ClusterJoin(node *index.Node, opts ...grpc.CallOption) error { + req := &index.ClusterJoinRequest{ + Node: node, } - _, err = c.client.SetNode(c.ctx, req, opts...) + _, err := c.client.ClusterJoin(c.ctx, req, opts...) if err != nil { return err } @@ -162,12 +145,12 @@ func (c *GRPCClient) SetNode(id string, nodeConfig map[string]interface{}, opts return nil } -func (c *GRPCClient) DeleteNode(id string, opts ...grpc.CallOption) error { - req := &index.DeleteNodeRequest{ +func (c *GRPCClient) ClusterLeave(id string, opts ...grpc.CallOption) error { + req := &index.ClusterLeaveRequest{ Id: id, } - _, err := c.client.DeleteNode(c.ctx, req, opts...) + _, err := c.client.ClusterLeave(c.ctx, req, opts...) if err != nil { return err } @@ -175,24 +158,21 @@ func (c *GRPCClient) DeleteNode(id string, opts ...grpc.CallOption) error { return nil } -func (c *GRPCClient) GetCluster(opts ...grpc.CallOption) (map[string]interface{}, error) { - resp, err := c.client.GetCluster(c.ctx, &empty.Empty{}, opts...) +func (c *GRPCClient) ClusterInfo(opts ...grpc.CallOption) (*index.Cluster, error) { + resp, err := c.client.ClusterInfo(c.ctx, &empty.Empty{}, opts...) if err != nil { st, _ := status.FromError(err) return nil, errors.New(st.Message()) } - ins, err := protobuf.MarshalAny(resp.Cluster) - cluster := *ins.(*map[string]interface{}) - - return cluster, nil + return resp.Cluster, nil } -func (c *GRPCClient) WatchCluster(opts ...grpc.CallOption) (index.Index_WatchClusterClient, error) { +func (c *GRPCClient) ClusterWatch(opts ...grpc.CallOption) (index.Index_ClusterWatchClient, error) { req := &empty.Empty{} - watchClient, err := c.client.WatchCluster(c.ctx, req, opts...) + watchClient, err := c.client.ClusterWatch(c.ctx, req, opts...) if err != nil { st, _ := status.FromError(err) return nil, errors.New(st.Message()) diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go index 5b8d5cb..18007ff 100644 --- a/indexer/grpc_service.go +++ b/indexer/grpc_service.go @@ -16,18 +16,18 @@ package indexer import ( "context" + "encoding/json" "errors" "fmt" "io" - "reflect" "sync" "time" "github.com/blevesearch/bleve" "github.com/golang/protobuf/ptypes/any" "github.com/golang/protobuf/ptypes/empty" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/raft" - "github.com/mosuka/blast/config" blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/manager" @@ -40,16 +40,17 @@ import ( ) type GRPCService struct { - clusterConfig *config.ClusterConfig - raftServer *RaftServer - logger *zap.Logger + managerGrpcAddress string + shardId string + raftServer *RaftServer + logger *zap.Logger updateClusterStopCh chan struct{} updateClusterDoneCh chan struct{} - peers map[string]interface{} + peers *index.Cluster peerClients map[string]*GRPCClient - cluster map[string]interface{} - clusterChans map[chan index.GetClusterResponse]struct{} + cluster *index.Cluster + clusterChans map[chan index.ClusterWatchResponse]struct{} clusterMutex sync.RWMutex managers *management.Cluster @@ -58,16 +59,17 @@ type GRPCService struct { updateManagersDoneCh chan struct{} } -func NewGRPCService(clusterConfig *config.ClusterConfig, raftServer *RaftServer, logger *zap.Logger) (*GRPCService, error) { +func NewGRPCService(managerGrpcAddress string, shardId string, raftServer *RaftServer, logger *zap.Logger) (*GRPCService, error) { return &GRPCService{ - clusterConfig: clusterConfig, - raftServer: raftServer, - logger: logger, + managerGrpcAddress: managerGrpcAddress, + shardId: shardId, + raftServer: raftServer, + logger: logger, - peers: make(map[string]interface{}, 0), + peers: &index.Cluster{Nodes: make(map[string]*index.Node, 0)}, peerClients: make(map[string]*GRPCClient, 0), - cluster: make(map[string]interface{}, 0), - clusterChans: make(map[chan index.GetClusterResponse]struct{}), + cluster: &index.Cluster{Nodes: make(map[string]*index.Node, 0)}, + clusterChans: make(map[chan index.ClusterWatchResponse]struct{}), managers: &management.Cluster{Nodes: make(map[string]*management.Node, 0)}, managerClients: make(map[string]*manager.GRPCClient, 0), @@ -75,14 +77,29 @@ func NewGRPCService(clusterConfig *config.ClusterConfig, raftServer *RaftServer, } func (s *GRPCService) Start() error { - s.logger.Info("start to update cluster info") - go s.startUpdateCluster(500 * time.Millisecond) + if s.managerGrpcAddress != "" { + var err error + s.managers, err = s.getManagerCluster(s.managerGrpcAddress) + if err != nil { + s.logger.Fatal(err.Error()) + return err + } + + for id, node := range s.managers.Nodes { + client, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) + if err != nil { + s.logger.Fatal(err.Error(), zap.String("id", id), zap.String("grpc_address", s.managerGrpcAddress)) + } + s.managerClients[node.Id] = client + } - if s.clusterConfig.ManagerAddr != "" { s.logger.Info("start to update manager cluster info") go s.startUpdateManagers(500 * time.Millisecond) } + s.logger.Info("start to update cluster info") + go s.startUpdateCluster(500 * time.Millisecond) + return nil } @@ -90,7 +107,7 @@ func (s *GRPCService) Stop() error { s.logger.Info("stop to update cluster info") s.stopUpdateCluster() - if s.clusterConfig.ManagerAddr != "" { + if s.managerGrpcAddress != "" { s.logger.Info("stop to update manager cluster info") s.stopUpdateManagers() } @@ -126,7 +143,7 @@ func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { return nil, err } -func (s *GRPCService) getInitialManagers(managerAddr string) (*management.Cluster, error) { +func (s *GRPCService) getManagerCluster(managerAddr string) (*management.Cluster, error) { client, err := manager.NewGRPCClient(managerAddr) defer func() { err := client.Close() @@ -149,6 +166,21 @@ func (s *GRPCService) getInitialManagers(managerAddr string) (*management.Cluste return managers, nil } +func (s *GRPCService) cloneManagerCluster(cluster *management.Cluster) (*management.Cluster, error) { + b, err := json.Marshal(cluster) + if err != nil { + return nil, err + } + + var clone *management.Cluster + err = json.Unmarshal(b, &clone) + if err != nil { + return nil, err + } + + return clone, nil +} + func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.updateManagersStopCh = make(chan struct{}) s.updateManagersDoneCh = make(chan struct{}) @@ -157,50 +189,20 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { close(s.updateManagersDoneCh) }() - var err error - - // get initial managers - s.managers, err = s.getInitialManagers(s.clusterConfig.ManagerAddr) - if err != nil { - s.logger.Error(err.Error()) - return - } - s.logger.Debug("initialize manager list", zap.Any("managers", s.managers)) - - // create clients for managers - for nodeId, node := range s.managers.Nodes { - if node.Metadata == nil { - s.logger.Warn("missing metadata", zap.String("id", nodeId)) - continue - } - - if node.Metadata.GrpcAddress == "" { - s.logger.Warn("missing gRPC address", zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - - s.logger.Debug("create gRPC client", zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - client, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - } - if client != nil { - s.managerClients[nodeId] = client - } - } - for { select { case <-s.updateManagersStopCh: s.logger.Info("received a request to stop updating a manager cluster") return default: + // get client for manager from the list client, err := s.getManagerClient() if err != nil { s.logger.Error(err.Error()) continue } + // create stream for watching cluster changes stream, err := client.ClusterWatch() if err != nil { s.logger.Error(err.Error()) @@ -217,80 +219,77 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.logger.Error(err.Error()) continue } - managers := resp.Cluster - - if !reflect.DeepEqual(s.managers, managers) { - // open clients - for nodeId, nodeConfig := range managers.Nodes { - if nodeConfig.Metadata == nil { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId)) - continue + s.logger.Info("cluster has changed", zap.Any("resp", resp)) + switch resp.Event { + case management.ClusterWatchResponse_JOIN, management.ClusterWatchResponse_UPDATE: + // add to cluster nodes + s.managers.Nodes[resp.Node.Id] = resp.Node + + // check node state + switch resp.Node.State { + case management.Node_UNKNOWN, management.Node_SHUTDOWN: + // close client + if client, exist := s.managerClients[resp.Node.Id]; exist { + s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) + err = client.Close() + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id)) + } + delete(s.managerClients, resp.Node.Id) } - - if nodeConfig.Metadata.GrpcAddress == "" { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) + default: // management.Node_FOLLOWER, management.Node_CANDIDATE, management.Node_LEADER + if resp.Node.Metadata.GrpcAddress == "" { + s.logger.Warn("missing gRPC address", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) continue } - client, exist := s.managerClients[nodeId] - if exist { - s.logger.Debug("client has already exist in manager list", zap.String("id", nodeId)) - - if client.GetAddress() != nodeConfig.Metadata.GrpcAddress { - s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) - s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) - - delete(s.managerClients, nodeId) - + // check client that already exist in the client list + if client, exist := s.managerClients[resp.Node.Id]; !exist { + // create new client + s.logger.Info("create gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) + newClient, err := manager.NewGRPCClient(resp.Node.Metadata.GrpcAddress) + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) + continue + } + s.managerClients[resp.Node.Id] = newClient + } else { + if client.GetAddress() != resp.Node.Metadata.GrpcAddress { + // close client + s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) err = client.Close() if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId)) + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id)) } + delete(s.managerClients, resp.Node.Id) - newClient, err := manager.NewGRPCClient(nodeConfig.Metadata.GrpcAddress) + // re-create new client + s.logger.Info("re-create gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) + newClient, err := manager.NewGRPCClient(resp.Node.Metadata.GrpcAddress) if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) + continue } - - if newClient != nil { - s.managerClients[nodeId] = newClient - } - } else { - s.logger.Debug("gRPC address has not changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) - } - } else { - s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) - - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) - newClient, err := manager.NewGRPCClient(nodeConfig.Metadata.GrpcAddress) - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) - } - if newClient != nil { - s.managerClients[nodeId] = newClient + s.managerClients[resp.Node.Id] = newClient } } } - - // close nonexistent clients - for nodeId, client := range s.managerClients { - if nodeConfig, exist := managers.Nodes[nodeId]; !exist { - s.logger.Info("this client is no longer in use", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - - s.logger.Debug("close client", zap.String("node_id", nodeId), zap.String("address", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("address", client.GetAddress())) - } - - s.logger.Debug("delete client", zap.String("node_id", nodeId)) - delete(s.managerClients, nodeId) + case management.ClusterWatchResponse_LEAVE: + if client, exist := s.managerClients[resp.Node.Id]; exist { + s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) + err = client.Close() + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) } + delete(s.managerClients, resp.Node.Id) } - // keep current manager cluster - s.managers = managers - s.logger.Debug("managers", zap.Any("managers", s.managers)) + if _, exist := s.managers.Nodes[resp.Node.Id]; exist { + delete(s.managers.Nodes, resp.Node.Id) + } + default: + s.logger.Debug("unknown event", zap.Any("event", resp.Event)) + continue } } } @@ -317,34 +316,33 @@ func (s *GRPCService) stopUpdateManagers() { } func (s *GRPCService) getLeaderClient() (*GRPCClient, error) { - var client *GRPCClient - - for id, node := range s.cluster { - state, ok := node.(map[string]interface{})["state"].(string) - if !ok { - s.logger.Warn("missing state", zap.String("id", id), zap.String("state", state)) - continue - } - - if state == raft.Leader.String() { - client, ok = s.peerClients[id] - if ok { - break - } else { - s.logger.Error("node does not exist", zap.String("id", id)) + for id, node := range s.cluster.Nodes { + switch node.State { + case index.Node_LEADER: + if client, exist := s.peerClients[id]; exist { + return client, nil } - } else { - s.logger.Debug("not a leader", zap.String("id", id)) } } - if client == nil { - err := errors.New("there is no leader") - s.logger.Error(err.Error()) + err := errors.New("there is no leader") + s.logger.Error(err.Error()) + return nil, err +} + +func (s *GRPCService) cloneCluster(cluster *index.Cluster) (*index.Cluster, error) { + b, err := json.Marshal(cluster) + if err != nil { return nil, err } - return client, nil + var clone *index.Cluster + err = json.Unmarshal(b, &clone) + if err != nil { + return nil, err + } + + return clone, nil } func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { @@ -358,138 +356,168 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { ticker := time.NewTicker(checkInterval) defer ticker.Stop() + savedCluster, err := s.cloneCluster(s.cluster) + if err != nil { + s.logger.Error(err.Error()) + return + } + for { select { case <-s.updateClusterStopCh: s.logger.Info("received a request to stop updating a cluster") return case <-ticker.C: - cluster, err := s.getCluster() + s.cluster, err = s.getCluster() + if err != nil { + s.logger.Error(err.Error()) + return + } + + snapshotCluster, err := s.cloneCluster(s.cluster) if err != nil { s.logger.Error(err.Error()) return } // create peer node list with out self node - peers := make(map[string]interface{}, 0) - for nodeId, node := range cluster { - if nodeId != s.NodeID() { - peers[nodeId] = node + for id, node := range snapshotCluster.Nodes { + if id != s.NodeID() { + s.peers.Nodes[id] = node } } - if !reflect.DeepEqual(s.peers, peers) { - // open clients - for nodeId, nodeInfo := range peers { - nodeConfig, ok := nodeInfo.(map[string]interface{})["node_config"].(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("node_id", nodeId), zap.Any("node_info", nodeInfo)) - continue - } - grpcAddr, ok := nodeConfig["grpc_addr"].(string) - if !ok { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - continue - } - - client, exist := s.peerClients[nodeId] - if exist { - s.logger.Debug("client has already exist in peer list", zap.String("node_id", nodeId)) - - if client.GetAddress() != grpcAddr { - s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) - s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - - delete(s.peerClients, nodeId) - - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId)) - } - - newClient, err := NewGRPCClient(grpcAddr) - if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - } - - if newClient != nil { - s.peerClients[nodeId] = newClient - } - } else { - s.logger.Debug("gRPC address has not changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) - } - } else { - s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) + // open clients for peer nodes + for id, node := range s.peers.Nodes { + if node.Metadata.GrpcAddress == "" { + s.logger.Debug("missing gRPC address", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + continue + } - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - peerClient, err := NewGRPCClient(grpcAddr) + client, exist := s.peerClients[id] + if exist { + if client.GetAddress() != node.Metadata.GrpcAddress { + s.logger.Info("recreate gRPC client", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + delete(s.peerClients, id) + err = client.Close() if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + s.logger.Warn(err.Error(), zap.String("id", id)) } - if peerClient != nil { - s.logger.Debug("append peer client to peer client list", zap.String("grpc_addr", peerClient.GetAddress())) - s.peerClients[nodeId] = peerClient + newClient, err := NewGRPCClient(node.Metadata.GrpcAddress) + if err != nil { + s.logger.Error(err.Error(), zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + continue } + s.peerClients[id] = newClient + } + } else { + s.logger.Info("create gRPC client", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + newClient, err := NewGRPCClient(node.Metadata.GrpcAddress) + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + continue } + s.peerClients[id] = newClient } + } - // close nonexistent clients - for nodeId, client := range s.peerClients { - if nodeConfig, exist := peers[nodeId]; !exist { - s.logger.Info("this client is no longer in use", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) + // close clients for non-existent peer nodes + for id, client := range s.peerClients { + if _, exist := s.peers.Nodes[id]; !exist { + s.logger.Info("close gRPC client", zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) + err = client.Close() + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) + } + delete(s.peerClients, id) + } + } - s.logger.Debug("close client", zap.String("node_id", nodeId), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", client.GetAddress())) + // check joined and updated nodes + for id, node := range snapshotCluster.Nodes { + nodeSnapshot, exist := savedCluster.Nodes[id] + if exist { + // node exists in the cluster + n1, err := json.Marshal(node) + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", node)) + continue + } + n2, err := json.Marshal(nodeSnapshot) + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", nodeSnapshot)) + continue + } + if !cmp.Equal(n1, n2) { + // node updated + // notify the cluster changes + clusterResp := &index.ClusterWatchResponse{ + Event: index.ClusterWatchResponse_UPDATE, + Node: node, + Cluster: snapshotCluster, } - - s.logger.Debug("delete client", zap.String("node_id", nodeId)) - delete(s.peerClients, nodeId) + for c := range s.clusterChans { + c <- *clusterResp + } + } + } else { + // node joined + // notify the cluster changes + clusterResp := &index.ClusterWatchResponse{ + Event: index.ClusterWatchResponse_JOIN, + Node: node, + Cluster: snapshotCluster, + } + for c := range s.clusterChans { + c <- *clusterResp } } + } - // keep current peer nodes - s.logger.Debug("current peers", zap.Any("peers", peers)) - s.peers = peers - } else { - s.logger.Debug("there is no change in peers", zap.Any("peers", peers)) + // check left nodes + for id, node := range savedCluster.Nodes { + if _, exist := snapshotCluster.Nodes[id]; !exist { + // node left + // notify the cluster changes + clusterResp := &index.ClusterWatchResponse{ + Event: index.ClusterWatchResponse_LEAVE, + Node: node, + Cluster: snapshotCluster, + } + for c := range s.clusterChans { + c <- *clusterResp + } + } } - // notify current cluster - if !reflect.DeepEqual(s.cluster, cluster) { - // convert to GetClusterResponse for channel output - clusterResp := &index.GetClusterResponse{} - clusterAny := &any.Any{} - err = protobuf.UnmarshalAny(cluster, clusterAny) + // set cluster state to manager + if !cmp.Equal(savedCluster, snapshotCluster) && s.managerGrpcAddress != "" && s.raftServer.IsLeader() { + snapshotClusterBytes, err := json.Marshal(snapshotCluster) if err != nil { - s.logger.Warn(err.Error()) + s.logger.Error(err.Error()) + continue } - clusterResp.Cluster = clusterAny - - // output to channel - for c := range s.clusterChans { - c <- *clusterResp + var snapshotClusterMap map[string]interface{} + err = json.Unmarshal(snapshotClusterBytes, &snapshotClusterMap) + if err != nil { + s.logger.Error(err.Error()) + continue } - // notify cluster config to manager - if s.clusterConfig.ManagerAddr != "" && s.raftServer.IsLeader() { - client, err := s.getManagerClient() - if err != nil { - s.logger.Error(err.Error()) - } - err = client.Set(fmt.Sprintf("cluster_config/clusters/%s/nodes", s.clusterConfig.ClusterId), cluster) - if err != nil { - s.logger.Error(err.Error()) - } + client, err := s.getManagerClient() + if err != nil { + s.logger.Error(err.Error()) + continue + } + s.logger.Info("update shards", zap.Any("shards", snapshotClusterMap)) + err = client.Set(fmt.Sprintf("cluster/shards/%s", s.shardId), snapshotClusterMap) + if err != nil { + s.logger.Error(err.Error()) + continue } - - // keep current cluster - s.logger.Debug("current cluster", zap.Any("cluster", cluster)) - s.cluster = cluster - } else { - s.logger.Debug("there is no change in cluster", zap.Any("cluster", cluster)) } + + savedCluster = snapshotCluster default: time.Sleep(100 * time.Millisecond) } @@ -516,17 +544,16 @@ func (s *GRPCService) stopUpdateCluster() { s.logger.Info("the cluster update has been stopped") } -func (s *GRPCService) LivenessProbe(ctx context.Context, req *empty.Empty) (*index.LivenessProbeResponse, error) { - resp := &index.LivenessProbeResponse{ - State: index.LivenessProbeResponse_ALIVE, - } +func (s *GRPCService) NodeHealthCheck(ctx context.Context, req *index.NodeHealthCheckRequest) (*index.NodeHealthCheckResponse, error) { + resp := &index.NodeHealthCheckResponse{} - return resp, nil -} - -func (s *GRPCService) ReadinessProbe(ctx context.Context, req *empty.Empty) (*index.ReadinessProbeResponse, error) { - resp := &index.ReadinessProbeResponse{ - State: index.ReadinessProbeResponse_READY, + switch req.Probe { + case index.NodeHealthCheckRequest_HEALTHINESS: + resp.State = index.NodeHealthCheckResponse_HEALTHY + case index.NodeHealthCheckRequest_LIVENESS: + resp.State = index.NodeHealthCheckResponse_ALIVE + case index.NodeHealthCheckRequest_READINESS: + resp.State = index.NodeHealthCheckResponse_READY } return resp, nil @@ -536,90 +563,73 @@ func (s *GRPCService) NodeID() string { return s.raftServer.NodeID() } -func (s *GRPCService) getSelfNode() (map[string]interface{}, error) { - return map[string]interface{}{ - "node_config": s.raftServer.nodeConfig.ToMap(), - "state": s.raftServer.State().String(), - }, nil -} - -func (s *GRPCService) getPeerNode(id string) (map[string]interface{}, error) { - var nodeInfo map[string]interface{} - var err error +func (s *GRPCService) getSelfNode() *index.Node { + node := s.raftServer.node - if peerClient, exist := s.peerClients[id]; exist { - nodeInfo, err = peerClient.GetNode(id) - if err != nil { - s.logger.Warn(err.Error()) - nodeInfo = map[string]interface{}{ - "node_config": map[string]interface{}{}, - "state": raft.Shutdown.String(), - } - } - } else { - s.logger.Warn("node does not exist in peer list", zap.String("id", id)) - nodeInfo = map[string]interface{}{ - "node_config": map[string]interface{}{}, - "state": raft.Shutdown.String(), - } + switch s.raftServer.State() { + case raft.Follower: + node.State = index.Node_FOLLOWER + case raft.Candidate: + node.State = index.Node_CANDIDATE + case raft.Leader: + node.State = index.Node_LEADER + case raft.Shutdown: + node.State = index.Node_SHUTDOWN + default: + node.State = index.Node_UNKNOWN } - return nodeInfo, nil + return node } -func (s *GRPCService) getNode(id string) (map[string]interface{}, error) { - var nodeInfo map[string]interface{} - var err error - - if id == "" || id == s.NodeID() { - nodeInfo, err = s.getSelfNode() - } else { - nodeInfo, err = s.getPeerNode(id) +func (s *GRPCService) getPeerNode(id string) (*index.Node, error) { + if _, exist := s.peerClients[id]; !exist { + err := errors.New("node does not exist in peers") + s.logger.Debug(err.Error(), zap.String("id", id)) + return nil, err } + node, err := s.peerClients[id].NodeInfo() if err != nil { - s.logger.Error(err.Error()) - return nil, err + s.logger.Debug(err.Error(), zap.String("id", id)) + return &index.Node{ + BindAddress: "", + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: "", + HttpAddress: "", + }, + }, nil } - return nodeInfo, nil + return node, nil } -func (s *GRPCService) GetNode(ctx context.Context, req *index.GetNodeRequest) (*index.GetNodeResponse, error) { - resp := &index.GetNodeResponse{} +func (s *GRPCService) getNode(id string) (*index.Node, error) { + if id == "" || id == s.NodeID() { + return s.getSelfNode(), nil + } else { + return s.getPeerNode(id) + } +} + +func (s *GRPCService) NodeInfo(ctx context.Context, req *empty.Empty) (*index.NodeInfoResponse, error) { + resp := &index.NodeInfoResponse{} - nodeInfo, err := s.getNode(req.Id) + node, err := s.getNode(s.NodeID()) if err != nil { s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } - nodeConfigAny := &any.Any{} - if nodeConfig, exist := nodeInfo["node_config"]; exist { - err = protobuf.UnmarshalAny(nodeConfig.(map[string]interface{}), nodeConfigAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } else { - s.logger.Error("missing node_config", zap.Any("node_config", nodeConfig)) - } - - state, exist := nodeInfo["state"].(string) - if !exist { - s.logger.Error("missing node state", zap.String("state", state)) - state = raft.Shutdown.String() - } - - resp.NodeConfig = nodeConfigAny - resp.State = state - - return resp, nil + return &index.NodeInfoResponse{ + Node: node, + }, nil } -func (s *GRPCService) setNode(id string, nodeConfig map[string]interface{}) error { +func (s *GRPCService) setNode(node *index.Node) error { if s.raftServer.IsLeader() { - err := s.raftServer.SetNode(id, nodeConfig) + err := s.raftServer.SetNode(node) if err != nil { s.logger.Error(err.Error()) return err @@ -631,7 +641,7 @@ func (s *GRPCService) setNode(id string, nodeConfig map[string]interface{}) erro s.logger.Error(err.Error()) return err } - err = client.SetNode(id, nodeConfig) + err = client.ClusterJoin(node) if err != nil { s.logger.Error(err.Error()) return err @@ -641,18 +651,10 @@ func (s *GRPCService) setNode(id string, nodeConfig map[string]interface{}) erro return nil } -func (s *GRPCService) SetNode(ctx context.Context, req *index.SetNodeRequest) (*empty.Empty, error) { +func (s *GRPCService) ClusterJoin(ctx context.Context, req *index.ClusterJoinRequest) (*empty.Empty, error) { resp := &empty.Empty{} - ins, err := protobuf.MarshalAny(req.NodeConfig) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - nodeConfig := *ins.(*map[string]interface{}) - - err = s.setNode(req.Id, nodeConfig) + err := s.setNode(req.Node) if err != nil { s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) @@ -675,7 +677,7 @@ func (s *GRPCService) deleteNode(id string) error { s.logger.Error(err.Error()) return err } - err = client.DeleteNode(id) + err = client.ClusterLeave(id) if err != nil { s.logger.Error(err.Error()) return err @@ -685,7 +687,7 @@ func (s *GRPCService) deleteNode(id string) error { return nil } -func (s *GRPCService) DeleteNode(ctx context.Context, req *index.DeleteNodeRequest) (*empty.Empty, error) { +func (s *GRPCService) ClusterLeave(ctx context.Context, req *index.ClusterLeaveRequest) (*empty.Empty, error) { resp := &empty.Empty{} err := s.deleteNode(req.Id) @@ -697,33 +699,28 @@ func (s *GRPCService) DeleteNode(ctx context.Context, req *index.DeleteNodeReque return resp, nil } -func (s *GRPCService) getCluster() (map[string]interface{}, error) { +func (s *GRPCService) getCluster() (*index.Cluster, error) { cluster, err := s.raftServer.GetCluster() if err != nil { s.logger.Error(err.Error()) return nil, err } - // update node state - for nodeId := range cluster { - node, err := s.getNode(nodeId) + // update latest node state + for id := range cluster.Nodes { + node, err := s.getNode(id) if err != nil { - s.logger.Error(err.Error()) - } - state := node["state"].(string) - - if _, ok := cluster[nodeId]; !ok { - cluster[nodeId] = map[string]interface{}{} + s.logger.Debug(err.Error()) + continue } - nodeInfo := cluster[nodeId].(map[string]interface{}) - nodeInfo["state"] = state + cluster.Nodes[id] = node } return cluster, nil } -func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*index.GetClusterResponse, error) { - resp := &index.GetClusterResponse{} +func (s *GRPCService) ClusterInfo(ctx context.Context, req *empty.Empty) (*index.ClusterInfoResponse, error) { + resp := &index.ClusterInfoResponse{} cluster, err := s.getCluster() if err != nil { @@ -731,20 +728,13 @@ func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*index. return resp, status.Error(codes.Internal, err.Error()) } - clusterAny := &any.Any{} - err = protobuf.UnmarshalAny(cluster, clusterAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.Cluster = clusterAny + resp.Cluster = cluster return resp, nil } -func (s *GRPCService) WatchCluster(req *empty.Empty, server index.Index_WatchClusterServer) error { - chans := make(chan index.GetClusterResponse) +func (s *GRPCService) ClusterWatch(req *empty.Empty, server index.Index_ClusterWatchServer) error { + chans := make(chan index.ClusterWatchResponse) s.clusterMutex.Lock() s.clusterChans[chans] = struct{}{} diff --git a/indexer/index.go b/indexer/index.go index f208a17..986c09b 100644 --- a/indexer/index.go +++ b/indexer/index.go @@ -19,14 +19,13 @@ import ( "os" "time" - "github.com/mosuka/blast/protobuf/index" - "github.com/blevesearch/bleve" "github.com/blevesearch/bleve/document" "github.com/golang/protobuf/ptypes/any" "github.com/mosuka/blast/config" "github.com/mosuka/blast/errors" "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/index" "go.uber.org/zap" ) diff --git a/indexer/raft_fsm.go b/indexer/raft_fsm.go index 5be21e4..3d6bfc9 100644 --- a/indexer/raft_fsm.go +++ b/indexer/raft_fsm.go @@ -21,15 +21,13 @@ import ( "io/ioutil" "sync" - "github.com/mosuka/blast/protobuf/index" - "github.com/blevesearch/bleve" "github.com/golang/protobuf/proto" "github.com/hashicorp/raft" "github.com/mosuka/blast/config" blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/maputils" "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/index" "go.uber.org/zap" ) @@ -38,8 +36,8 @@ type RaftFSM struct { indexConfig *config.IndexConfig logger *zap.Logger - metadata maputils.Map - metadataMutex sync.RWMutex + cluster *index.Cluster + clusterMutex sync.RWMutex index *Index } @@ -53,10 +51,11 @@ func NewRaftFSM(path string, indexConfig *config.IndexConfig, logger *zap.Logger } func (f *RaftFSM) Start() error { - var err error - - f.metadata = maputils.Map{} + f.logger.Info("initialize cluster") + f.cluster = &index.Cluster{Nodes: make(map[string]*index.Node, 0)} + f.logger.Info("initialize index") + var err error f.index, err = NewIndex(f.path, f.indexConfig, f.logger) if err != nil { f.logger.Error(err.Error()) @@ -67,6 +66,7 @@ func (f *RaftFSM) Start() error { } func (f *RaftFSM) Stop() error { + f.logger.Info("close index") err := f.index.Close() if err != nil { f.logger.Error(err.Error()) @@ -76,45 +76,37 @@ func (f *RaftFSM) Stop() error { return nil } -func (f *RaftFSM) GetNodeConfig(nodeId string) (map[string]interface{}, error) { - f.metadataMutex.RLock() - defer f.metadataMutex.RUnlock() +func (f *RaftFSM) GetNode(nodeId string) (*index.Node, error) { + f.clusterMutex.RLock() + defer f.clusterMutex.RUnlock() - nodeConfig, err := f.metadata.Get(nodeId) - if err != nil { - f.logger.Error(err.Error(), zap.String("node_id", nodeId)) - if err == maputils.ErrNotFound { - return nil, blasterrors.ErrNotFound - } - return nil, err + node, ok := f.cluster.Nodes[nodeId] + if !ok { + return nil, blasterrors.ErrNotFound } - return nodeConfig.(maputils.Map).ToMap(), nil + return node, nil } -func (f *RaftFSM) SetNodeConfig(nodeId string, nodeConfig map[string]interface{}) error { - f.metadataMutex.RLock() - defer f.metadataMutex.RUnlock() +func (f *RaftFSM) SetNode(node *index.Node) error { + f.clusterMutex.RLock() + defer f.clusterMutex.RUnlock() - err := f.metadata.Merge(nodeId, nodeConfig) - if err != nil { - f.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - return err - } + f.cluster.Nodes[node.Id] = node return nil } -func (f *RaftFSM) DeleteNodeConfig(nodeId string) error { - f.metadataMutex.RLock() - defer f.metadataMutex.RUnlock() +func (f *RaftFSM) DeleteNode(nodeId string) error { + f.clusterMutex.RLock() + defer f.clusterMutex.RUnlock() - err := f.metadata.Delete(nodeId) - if err != nil { - f.logger.Error(err.Error(), zap.String("node_id", nodeId)) - return err + if _, ok := f.cluster.Nodes[nodeId]; !ok { + return blasterrors.ErrNotFound } + delete(f.cluster.Nodes, nodeId) + return nil } @@ -215,7 +207,22 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - err = f.SetNodeConfig(data["node_id"].(string), data["node_config"].(map[string]interface{})) + b, err := json.Marshal(data["node"]) + if err != nil { + f.logger.Error(err.Error()) + return &fsmResponse{error: err} + } + var node *index.Node + err = json.Unmarshal(b, &node) + if err != nil { + f.logger.Error(err.Error()) + return &fsmResponse{error: err} + } + err = f.SetNode(node) + if err != nil { + f.logger.Error(err.Error()) + return &fsmResponse{error: err} + } return &fsmResponse{error: err} case deleteNode: var data map[string]interface{} @@ -224,7 +231,7 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - err = f.DeleteNodeConfig(data["node_id"].(string)) + err = f.DeleteNode(data["id"].(string)) return &fsmResponse{error: err} case indexDocument: var data []map[string]interface{} diff --git a/indexer/raft_server.go b/indexer/raft_server.go index 5d8d1c8..c2fa628 100644 --- a/indexer/raft_server.go +++ b/indexer/raft_server.go @@ -31,33 +31,38 @@ import ( "github.com/mosuka/blast/config" blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/indexutils" + "github.com/mosuka/blast/protobuf/index" "go.uber.org/zap" //raftmdb "github.com/hashicorp/raft-mdb" ) type RaftServer struct { - nodeConfig *config.NodeConfig - indexConfig *config.IndexConfig - bootstrap bool - logger *zap.Logger + node *index.Node + dataDir string + raftStorageType string + indexConfig *config.IndexConfig + bootstrap bool + logger *zap.Logger raft *raft.Raft fsm *RaftFSM } -func NewRaftServer(nodeConfig *config.NodeConfig, indexConfig *config.IndexConfig, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { +func NewRaftServer(node *index.Node, dataDir string, raftStorageType string, indexConfig *config.IndexConfig, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { return &RaftServer{ - nodeConfig: nodeConfig, - indexConfig: indexConfig, - bootstrap: bootstrap, - logger: logger, + node: node, + dataDir: dataDir, + raftStorageType: raftStorageType, + indexConfig: indexConfig, + bootstrap: bootstrap, + logger: logger, }, nil } func (s *RaftServer) Start() error { var err error - fsmPath := filepath.Join(s.nodeConfig.DataDir, "index") + fsmPath := filepath.Join(s.dataDir, "index") s.logger.Info("create finite state machine", zap.String("path", fsmPath)) s.fsm, err = NewRaftFSM(fsmPath, s.indexConfig, s.logger) if err != nil { @@ -72,27 +77,30 @@ func (s *RaftServer) Start() error { return err } - s.logger.Info("create Raft config", zap.String("node_id", s.nodeConfig.NodeId)) + s.logger.Info("create Raft config", zap.String("id", s.node.Id)) raftConfig := raft.DefaultConfig() - raftConfig.LocalID = raft.ServerID(s.nodeConfig.NodeId) + raftConfig.LocalID = raft.ServerID(s.node.Id) raftConfig.SnapshotThreshold = 1024 raftConfig.LogOutput = ioutil.Discard + //if s.bootstrap { + // raftConfig.StartAsLeader = true + //} - s.logger.Info("resolve TCP address", zap.String("bind_addr", s.nodeConfig.BindAddr)) - addr, err := net.ResolveTCPAddr("tcp", s.nodeConfig.BindAddr) + s.logger.Info("resolve TCP address", zap.String("bind_addr", s.node.BindAddress)) + addr, err := net.ResolveTCPAddr("tcp", s.node.BindAddress) if err != nil { s.logger.Fatal(err.Error()) return err } - s.logger.Info("create TCP transport", zap.String("bind_addr", s.nodeConfig.BindAddr)) - transport, err := raft.NewTCPTransport(s.nodeConfig.BindAddr, addr, 3, 10*time.Second, ioutil.Discard) + s.logger.Info("create TCP transport", zap.String("bind_addr", s.node.BindAddress)) + transport, err := raft.NewTCPTransport(s.node.BindAddress, addr, 3, 10*time.Second, ioutil.Discard) if err != nil { s.logger.Fatal(err.Error()) return err } - snapshotPath := s.nodeConfig.DataDir + snapshotPath := s.dataDir s.logger.Info("create snapshot store", zap.String("path", snapshotPath)) snapshotStore, err := raft.NewFileSnapshotStore(snapshotPath, 2, ioutil.Discard) if err != nil { @@ -103,10 +111,10 @@ func (s *RaftServer) Start() error { s.logger.Info("create Raft machine") var logStore raft.LogStore var stableStore raft.StableStore - switch s.nodeConfig.RaftStorageType { + switch s.raftStorageType { case "boltdb": - logStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "log", "boltdb.db") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) + logStorePath := filepath.Join(s.dataDir, "raft", "log", "boltdb.db") + s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) err = os.MkdirAll(filepath.Dir(logStorePath), 0755) if err != nil { s.logger.Fatal(err.Error()) @@ -117,8 +125,8 @@ func (s *RaftServer) Start() error { s.logger.Fatal(err.Error()) return err } - stableStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "stable", "boltdb.db") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) + stableStorePath := filepath.Join(s.dataDir, "raft", "stable", "boltdb.db") + s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) err = os.MkdirAll(filepath.Dir(stableStorePath), 0755) stableStore, err = raftboltdb.NewBoltStore(stableStorePath) if err != nil { @@ -126,8 +134,8 @@ func (s *RaftServer) Start() error { return err } case "badger": - logStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "log") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) + logStorePath := filepath.Join(s.dataDir, "raft", "log") + s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) err = os.MkdirAll(filepath.Join(logStorePath, "badger"), 0755) if err != nil { s.logger.Fatal(err.Error()) @@ -138,8 +146,8 @@ func (s *RaftServer) Start() error { s.logger.Fatal(err.Error()) return err } - stableStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "stable") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) + stableStorePath := filepath.Join(s.dataDir, "raft", "stable") + s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) err = os.MkdirAll(filepath.Join(stableStorePath, "badger"), 0755) if err != nil { s.logger.Fatal(err.Error()) @@ -151,8 +159,8 @@ func (s *RaftServer) Start() error { return err } default: - logStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "log", "boltdb.db") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) + logStorePath := filepath.Join(s.dataDir, "raft", "log", "boltdb.db") + s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) err = os.MkdirAll(filepath.Dir(logStorePath), 0755) if err != nil { s.logger.Fatal(err.Error()) @@ -163,8 +171,8 @@ func (s *RaftServer) Start() error { s.logger.Fatal(err.Error()) return err } - stableStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "stable", "boltdb.db") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) + stableStorePath := filepath.Join(s.dataDir, "raft", "stable", "boltdb.db") + s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) err = os.MkdirAll(filepath.Dir(stableStorePath), 0755) stableStore, err = raftboltdb.NewBoltStore(stableStorePath) if err != nil { @@ -200,11 +208,11 @@ func (s *RaftServer) Start() error { } // set node config - s.logger.Info("register its own information", zap.String("node_id", s.nodeConfig.NodeId), zap.Any("node_config", s.nodeConfig)) - err = s.setNodeConfig(s.nodeConfig.NodeId, s.nodeConfig.ToMap()) + s.logger.Info("register its own node config", zap.Any("node", s.node)) + err = s.setNode(s.node) if err != nil { s.logger.Fatal(err.Error()) - return nil + return err } } @@ -230,17 +238,6 @@ func (s *RaftServer) Stop() error { return nil } -func (s *RaftServer) raftServers() ([]raft.Server, error) { - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return cf.Configuration().Servers, nil -} - func (s *RaftServer) LeaderAddress(timeout time.Duration) (raft.ServerAddress, error) { ticker := time.NewTicker(100 * time.Millisecond) defer ticker.Stop() @@ -270,13 +267,14 @@ func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { return "", err } - servers, err := s.raftServers() + cf := s.raft.GetConfiguration() + err = cf.Error() if err != nil { s.logger.Error(err.Error()) return "", err } - for _, server := range servers { + for _, server := range cf.Configuration().Servers { if server.Address == leaderAddr { return server.ID, nil } @@ -287,7 +285,7 @@ func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { } func (s *RaftServer) NodeID() string { - return s.nodeConfig.NodeId + return s.node.Id } func (s *RaftServer) Stats() map[string]string { @@ -312,99 +310,98 @@ func (s *RaftServer) WaitForDetectLeader(timeout time.Duration) error { return nil } -func (s *RaftServer) getNodeConfig(nodeId string) (map[string]interface{}, error) { - nodeConfig, err := s.fsm.GetNodeConfig(nodeId) +func (s *RaftServer) getNode(nodeId string) (*index.Node, error) { + nodeConfig, err := s.fsm.GetNode(nodeId) if err != nil { - s.logger.Error(err.Error()) + s.logger.Debug(err.Error(), zap.String("id", nodeId)) return nil, err } return nodeConfig, nil } -func (s *RaftServer) setNodeConfig(nodeId string, nodeConfig map[string]interface{}) error { +func (s *RaftServer) setNode(node *index.Node) error { msg, err := newMessage( setNode, map[string]interface{}{ - "node_id": nodeId, - "node_config": nodeConfig, + "node": node, }, ) if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.Any("node", node)) return err } msgBytes, err := json.Marshal(msg) if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.Any("node", node)) return err } f := s.raft.Apply(msgBytes, 10*time.Second) err = f.Error() if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.Any("node", node)) return err } err = f.Response().(*fsmResponse).error if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.Any("node", node)) return err } return nil } -func (s *RaftServer) deleteNodeConfig(nodeId string) error { +func (s *RaftServer) deleteNode(nodeId string) error { msg, err := newMessage( deleteNode, map[string]interface{}{ - "node_id": nodeId, + "id": nodeId, }, ) if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId)) return err } msgBytes, err := json.Marshal(msg) if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId)) return err } f := s.raft.Apply(msgBytes, 10*time.Second) err = f.Error() if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId)) return err } err = f.Response().(*fsmResponse).error if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId)) return err } return nil } -func (s *RaftServer) GetNode(id string) (map[string]interface{}, error) { - servers, err := s.raftServers() +func (s *RaftServer) GetNode(id string) (*index.Node, error) { + cf := s.raft.GetConfiguration() + err := cf.Error() if err != nil { s.logger.Error(err.Error()) return nil, err } - node := make(map[string]interface{}, 0) - for _, server := range servers { + var node *index.Node + for _, server := range cf.Configuration().Servers { if server.ID == raft.ServerID(id) { - nodeConfig, err := s.getNodeConfig(id) + node, err = s.getNode(id) if err != nil { - s.logger.Error(err.Error()) + s.logger.Debug(err.Error(), zap.String("id", id)) return nil, err } - node["node_config"] = nodeConfig break } } @@ -412,44 +409,45 @@ func (s *RaftServer) GetNode(id string) (map[string]interface{}, error) { return node, nil } -func (s *RaftServer) SetNode(nodeId string, nodeConfig map[string]interface{}) error { +func (s *RaftServer) SetNode(node *index.Node) error { if !s.IsLeader() { s.logger.Warn(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) return raft.ErrNotLeader } - servers, err := s.raftServers() + cf := s.raft.GetConfiguration() + err := cf.Error() if err != nil { s.logger.Error(err.Error()) return err } - for _, server := range servers { - if server.ID == raft.ServerID(nodeId) { - s.logger.Info("node already joined the cluster", zap.String("id", nodeId)) + for _, server := range cf.Configuration().Servers { + if server.ID == raft.ServerID(node.Id) { + s.logger.Info("node already joined the cluster", zap.Any("id", node.Id)) return nil } } - bindAddr, ok := nodeConfig["bind_addr"].(string) - if !ok { - s.logger.Error("missing metadata", zap.String("bind_addr", bindAddr)) - return errors.New("missing metadata") + if node.BindAddress == "" { + err = errors.New("missing bind address") + s.logger.Error(err.Error(), zap.String("bind_addr", node.BindAddress)) + return err } // add node to Raft cluster - s.logger.Info("add voter", zap.String("nodeId", nodeId), zap.String("address", bindAddr)) - f := s.raft.AddVoter(raft.ServerID(nodeId), raft.ServerAddress(bindAddr), 0, 0) + s.logger.Info("join the node to the raft cluster", zap.String("id", node.Id), zap.Any("bind_address", node.BindAddress)) + f := s.raft.AddVoter(raft.ServerID(node.Id), raft.ServerAddress(node.BindAddress), 0, 0) err = f.Error() if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", node.Id), zap.String("bind_address", node.BindAddress)) return err } // set node config - err = s.setNodeConfig(nodeId, nodeConfig) + err = s.setNode(node) if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.Any("node", node)) return err } @@ -458,54 +456,57 @@ func (s *RaftServer) SetNode(nodeId string, nodeConfig map[string]interface{}) e func (s *RaftServer) DeleteNode(nodeId string) error { if !s.IsLeader() { - s.logger.Warn(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) + s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) return raft.ErrNotLeader } - servers, err := s.raftServers() + cf := s.raft.GetConfiguration() + err := cf.Error() if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId)) return err } // delete node from Raft cluster - for _, server := range servers { + for _, server := range cf.Configuration().Servers { if server.ID == raft.ServerID(nodeId) { - s.logger.Debug("remove server", zap.String("node_id", nodeId)) + s.logger.Info("remove the node from the raft cluster", zap.String("id", nodeId)) f := s.raft.RemoveServer(server.ID, 0, 0) err = f.Error() if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", string(server.ID))) return err } } } // delete node config - err = s.deleteNodeConfig(nodeId) + err = s.deleteNode(nodeId) if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId)) return err } return nil } -func (s *RaftServer) GetCluster() (map[string]interface{}, error) { - servers, err := s.raftServers() +func (s *RaftServer) GetCluster() (*index.Cluster, error) { + cf := s.raft.GetConfiguration() + err := cf.Error() if err != nil { s.logger.Error(err.Error()) return nil, err } - cluster := map[string]interface{}{} - for _, server := range servers { + cluster := &index.Cluster{Nodes: make(map[string]*index.Node, 0)} + for _, server := range cf.Configuration().Servers { node, err := s.GetNode(string(server.ID)) if err != nil { - s.logger.Warn(err.Error()) - node = map[string]interface{}{} + s.logger.Debug(err.Error(), zap.String("id", string(server.ID))) + continue } - cluster[string(server.ID)] = node + + cluster.Nodes[string(server.ID)] = node } return cluster, nil diff --git a/indexer/server.go b/indexer/server.go index 8813a48..1ffa188 100644 --- a/indexer/server.go +++ b/indexer/server.go @@ -15,22 +15,28 @@ package indexer import ( + "encoding/json" "fmt" accesslog "github.com/mash/go-accesslog" "github.com/mosuka/blast/config" "github.com/mosuka/blast/errors" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf/index" "go.uber.org/zap" ) type Server struct { - clusterConfig *config.ClusterConfig - nodeConfig *config.NodeConfig - indexConfig *config.IndexConfig - logger *zap.Logger - grpcLogger *zap.Logger - httpLogger accesslog.Logger + managerGrpcAddress string + shardId string + peerGrpcAddress string + node *index.Node + dataDir string + raftStorageType string + indexConfig *config.IndexConfig + logger *zap.Logger + grpcLogger *zap.Logger + httpLogger accesslog.Logger raftServer *RaftServer grpcService *GRPCService @@ -39,23 +45,27 @@ type Server struct { httpServer *HTTPServer } -func NewServer(clusterConfig *config.ClusterConfig, nodeConfig *config.NodeConfig, indexConfig *config.IndexConfig, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { +func NewServer(managerGrpcAddress string, shardId string, peerGrpcAddress string, node *index.Node, dataDir string, raftStorageType string, indexConfig *config.IndexConfig, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { return &Server{ - clusterConfig: clusterConfig, - nodeConfig: nodeConfig, - indexConfig: indexConfig, - logger: logger, - grpcLogger: grpcLogger, - httpLogger: httpLogger, + managerGrpcAddress: managerGrpcAddress, + shardId: shardId, + peerGrpcAddress: peerGrpcAddress, + node: node, + dataDir: dataDir, + raftStorageType: raftStorageType, + indexConfig: indexConfig, + logger: logger, + grpcLogger: grpcLogger, + httpLogger: httpLogger, }, nil } func (s *Server) Start() { // get peer from manager - if s.clusterConfig.ManagerAddr != "" { - s.logger.Info("connect to manager", zap.String("manager_addr", s.clusterConfig.ManagerAddr)) + if s.managerGrpcAddress != "" { + s.logger.Info("connect to manager", zap.String("manager_grpc_addr", s.managerGrpcAddress)) - mc, err := manager.NewGRPCClient(s.clusterConfig.ManagerAddr) + mc, err := manager.NewGRPCClient(s.managerGrpcAddress) defer func() { s.logger.Debug("close client", zap.String("address", mc.GetAddress())) err = mc.Close() @@ -69,45 +79,41 @@ func (s *Server) Start() { return } - clusterIntr, err := mc.Get(fmt.Sprintf("cluster_config/clusters/%s/nodes", s.clusterConfig.ClusterId)) + clusterIntr, err := mc.Get(fmt.Sprintf("cluster/shards/%s", s.shardId)) if err != nil && err != errors.ErrNotFound { s.logger.Fatal(err.Error()) return } if clusterIntr != nil { - cluster := *clusterIntr.(*map[string]interface{}) - for nodeId, nodeInfoIntr := range cluster { - if nodeId == s.nodeConfig.NodeId { - s.logger.Debug("skip own node id", zap.String("node_id", nodeId)) - continue - } - - nodeInfo := nodeInfoIntr.(map[string]interface{}) + b, err := json.Marshal(clusterIntr) + if err != nil { + s.logger.Fatal(err.Error()) + return + } - // get the peer node config - nodeConfig, ok := nodeInfo["node_config"].(map[string]interface{}) - if !ok { - s.logger.Error("missing node config", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - continue - } + var cluster *index.Cluster + err = json.Unmarshal(b, &cluster) + if err != nil { + s.logger.Fatal(err.Error()) + return + } - // get the peer node gRPC address - grpcAddr, ok := nodeConfig["grpc_addr"].(string) - if !ok { - s.logger.Error("missing gRPC address", zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) + for id, node := range cluster.Nodes { + if id == s.node.Id { + s.logger.Debug("skip own node id", zap.String("id", id)) continue } - s.logger.Info("peer node detected", zap.String("peer_addr", grpcAddr)) - s.clusterConfig.PeerAddr = grpcAddr + s.logger.Info("peer node detected", zap.String("peer_grpc_addr", node.Metadata.GrpcAddress)) + s.peerGrpcAddress = node.Metadata.GrpcAddress break } } } //get index config from manager or peer - if s.clusterConfig.ManagerAddr != "" { - mc, err := manager.NewGRPCClient(s.clusterConfig.ManagerAddr) + if s.managerGrpcAddress != "" { + mc, err := manager.NewGRPCClient(s.managerGrpcAddress) defer func() { s.logger.Debug("close client", zap.String("address", mc.GetAddress())) err = mc.Close() @@ -131,8 +137,8 @@ func (s *Server) Start() { if value != nil { s.indexConfig = config.NewIndexConfigFromMap(*value.(*map[string]interface{})) } - } else if s.clusterConfig.PeerAddr != "" { - pc, err := NewGRPCClient(s.clusterConfig.PeerAddr) + } else if s.peerGrpcAddress != "" { + pc, err := NewGRPCClient(s.peerGrpcAddress) defer func() { s.logger.Debug("close client", zap.String("address", pc.GetAddress())) err = pc.Close() @@ -159,41 +165,41 @@ func (s *Server) Start() { } // bootstrap node? - bootstrap := s.clusterConfig.PeerAddr == "" + bootstrap := s.peerGrpcAddress == "" s.logger.Info("bootstrap", zap.Bool("bootstrap", bootstrap)) var err error // create raft server - s.raftServer, err = NewRaftServer(s.nodeConfig, s.indexConfig, bootstrap, s.logger) + s.raftServer, err = NewRaftServer(s.node, s.dataDir, s.raftStorageType, s.indexConfig, bootstrap, s.logger) if err != nil { s.logger.Fatal(err.Error()) return } // create gRPC service - s.grpcService, err = NewGRPCService(s.clusterConfig, s.raftServer, s.logger) + s.grpcService, err = NewGRPCService(s.managerGrpcAddress, s.shardId, s.raftServer, s.logger) if err != nil { s.logger.Fatal(err.Error()) return } // create gRPC server - s.grpcServer, err = NewGRPCServer(s.nodeConfig.GRPCAddr, s.grpcService, s.grpcLogger) + s.grpcServer, err = NewGRPCServer(s.node.Metadata.GrpcAddress, s.grpcService, s.grpcLogger) if err != nil { s.logger.Fatal(err.Error()) return } // create HTTP router - s.httpRouter, err = NewRouter(s.nodeConfig.GRPCAddr, s.logger) + s.httpRouter, err = NewRouter(s.node.Metadata.GrpcAddress, s.logger) if err != nil { s.logger.Fatal(err.Error()) return } // create HTTP server - s.httpServer, err = NewHTTPServer(s.nodeConfig.HTTPAddr, s.httpRouter, s.logger, s.httpLogger) + s.httpServer, err = NewHTTPServer(s.node.Metadata.HttpAddress, s.httpRouter, s.logger, s.httpLogger) if err != nil { s.logger.Fatal(err.Error()) return @@ -235,7 +241,7 @@ func (s *Server) Start() { // join to the existing cluster if !bootstrap { - client, err := NewGRPCClient(s.clusterConfig.PeerAddr) + client, err := NewGRPCClient(s.peerGrpcAddress) defer func() { err := client.Close() if err != nil { @@ -247,7 +253,7 @@ func (s *Server) Start() { return } - err = client.SetNode(s.nodeConfig.NodeId, s.nodeConfig.ToMap()) + err = client.ClusterJoin(s.node) if err != nil { s.logger.Fatal(err.Error()) return diff --git a/indexer/server_test.go b/indexer/server_test.go index 58071bc..b527382 100644 --- a/indexer/server_test.go +++ b/indexer/server_test.go @@ -16,6 +16,7 @@ package indexer import ( "encoding/json" + "fmt" "io/ioutil" "os" "path/filepath" @@ -23,9 +24,9 @@ import ( "testing" "time" + "github.com/mosuka/blast/strutils" + "github.com/blevesearch/bleve" - "github.com/hashicorp/raft" - "github.com/mosuka/blast/config" "github.com/mosuka/blast/errors" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/logutils" @@ -36,31 +37,39 @@ import ( func TestServer_Start(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -78,31 +87,39 @@ func TestServer_Start(t *testing.T) { func TestServer_LivenessProbe(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -117,7 +134,7 @@ func TestServer_LivenessProbe(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -130,79 +147,34 @@ func TestServer_LivenessProbe(t *testing.T) { t.Fatalf("%v", err) } - // liveness - liveness, err := client.LivenessProbe() + // healthiness + healthiness, err := client.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) if err != nil { t.Fatalf("%v", err) } - expLiveness := index.LivenessProbeResponse_ALIVE.String() - actLiveness := liveness - if expLiveness != actLiveness { - t.Fatalf("expected content to see %v, saw %v", expLiveness, actLiveness) - } -} - -func TestServer_ReadinessProbe(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) + expHealthiness := index.NodeHealthCheckResponse_HEALTHY.String() + actHealthiness := healthiness + if expHealthiness != actHealthiness { + t.Fatalf("expected content to see %v, saw %v", expHealthiness, actHealthiness) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() + // liveness + liveness, err := client.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) if err != nil { t.Fatalf("%v", err) } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) + expLiveness := index.NodeHealthCheckResponse_ALIVE.String() + actLiveness := liveness + if expLiveness != actLiveness { + t.Fatalf("expected content to see %v, saw %v", expLiveness, actLiveness) } // readiness - readiness, err := client.ReadinessProbe() + readiness, err := client.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) if err != nil { t.Fatalf("%v", err) } - expReadiness := index.ReadinessProbeResponse_READY.String() + expReadiness := index.NodeHealthCheckResponse_READY.String() actReadiness := readiness if expReadiness != actReadiness { t.Fatalf("expected content to see %v, saw %v", expReadiness, actReadiness) @@ -212,31 +184,39 @@ func TestServer_ReadinessProbe(t *testing.T) { func TestServer_GetNode(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -251,7 +231,7 @@ func TestServer_GetNode(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -265,48 +245,61 @@ func TestServer_GetNode(t *testing.T) { } // get node - node, err := client.GetNode(nodeConfig.NodeId) + nodeInfo, err := client.NodeInfo() if err != nil { t.Fatalf("%v", err) } - expNode := map[string]interface{}{ - "node_config": nodeConfig.ToMap(), - "state": "Leader", + expNodeInfo := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_LEADER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, } - actNode := node - if !reflect.DeepEqual(expNode, actNode) { - t.Fatalf("expected content to see %v, saw %v", expNode, actNode) + actNodeInfo := nodeInfo + if !reflect.DeepEqual(expNodeInfo, actNodeInfo) { + t.Fatalf("expected content to see %v, saw %v", expNodeInfo, actNodeInfo) } } func TestServer_GetCluster(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -321,7 +314,7 @@ func TestServer_GetCluster(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -335,14 +328,21 @@ func TestServer_GetCluster(t *testing.T) { } // get cluster - cluster, err := client.GetCluster() + cluster, err := client.ClusterInfo() if err != nil { t.Fatalf("%v", err) } - expCluster := map[string]interface{}{ - nodeConfig.NodeId: map[string]interface{}{ - "node_config": nodeConfig.ToMap(), - "state": "Leader", + expCluster := &index.Cluster{ + Nodes: map[string]*index.Node{ + nodeId: { + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_LEADER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + }, }, } actCluster := cluster @@ -354,31 +354,39 @@ func TestServer_GetCluster(t *testing.T) { func TestServer_GetIndexMapping(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -393,7 +401,7 @@ func TestServer_GetIndexMapping(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -429,31 +437,39 @@ func TestServer_GetIndexMapping(t *testing.T) { func TestServer_GetIndexType(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -468,7 +484,7 @@ func TestServer_GetIndexType(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -501,31 +517,39 @@ func TestServer_GetIndexType(t *testing.T) { func TestServer_GetIndexStorageType(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -540,7 +564,7 @@ func TestServer_GetIndexStorageType(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -573,31 +597,39 @@ func TestServer_GetIndexStorageType(t *testing.T) { func TestServer_GetIndexStats(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -612,7 +644,7 @@ func TestServer_GetIndexStats(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -654,31 +686,39 @@ func TestServer_GetIndexStats(t *testing.T) { func TestServer_PutDocument(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -693,7 +733,7 @@ func TestServer_PutDocument(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -747,31 +787,39 @@ func TestServer_PutDocument(t *testing.T) { func TestServer_GetDocument(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -786,7 +834,7 @@ func TestServer_GetDocument(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -855,31 +903,39 @@ func TestServer_GetDocument(t *testing.T) { func TestServer_DeleteDocument(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -894,7 +950,7 @@ func TestServer_DeleteDocument(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -992,31 +1048,39 @@ func TestServer_DeleteDocument(t *testing.T) { func TestServer_Search(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -1031,7 +1095,7 @@ func TestServer_Search(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -1117,78 +1181,125 @@ func TestServer_Search(t *testing.T) { func TestCluster_Start(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + managerGrpcAddress1 := "" + shardId1 := "" + peerGrpcAddress1 := "" + grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() + raftStorageType1 := "boltdb" + + node1 := &index.Node{ + Id: nodeId1, + BindAddress: bindAddress1, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + } + + indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("server1"), grpcLogger, httpAccessLogger) + server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { - if server1 != nil { - server1.Stop() - } + server1.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server1 + + // start server server1.Start() - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("server2"), grpcLogger, httpAccessLogger) + managerGrpcAddress2 := "" + shardId2 := "" + peerGrpcAddress2 := grpcAddress1 + grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() + raftStorageType2 := "boltdb" + + node2 := &index.Node{ + Id: nodeId2, + BindAddress: bindAddress2, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { - if server2 != nil { - server2.Stop() - } + server2.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server2 + + // start server server2.Start() - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("server3"), grpcLogger, httpAccessLogger) + managerGrpcAddress3 := "" + shardId3 := "" + peerGrpcAddress3 := grpcAddress1 + grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() + raftStorageType3 := "boltdb" + + node3 := &index.Node{ + Id: nodeId3, + BindAddress: bindAddress3, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { - if server3 != nil { - server3.Stop() - } + server3.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server3 + + // start server server3.Start() // sleep @@ -1198,99 +1309,146 @@ func TestCluster_Start(t *testing.T) { func TestCluster_LivenessProbe(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + managerGrpcAddress1 := "" + shardId1 := "" + peerGrpcAddress1 := "" + grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() + raftStorageType1 := "boltdb" + + node1 := &index.Node{ + Id: nodeId1, + BindAddress: bindAddress1, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + } + + indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() + server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("server1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } + server1.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server1 + + // start server server1.Start() - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("server2"), grpcLogger, httpAccessLogger) + managerGrpcAddress2 := "" + shardId2 := "" + peerGrpcAddress2 := grpcAddress1 + grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() + raftStorageType2 := "boltdb" + + node2 := &index.Node{ + Id: nodeId2, + BindAddress: bindAddress2, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { - if server2 != nil { - server2.Stop() - } + server2.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server2 + + // start server server2.Start() - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("server3"), grpcLogger, httpAccessLogger) + managerGrpcAddress3 := "" + shardId3 := "" + peerGrpcAddress3 := grpcAddress1 + grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() + raftStorageType3 := "boltdb" + + node3 := &index.Node{ + Id: nodeId3, + BindAddress: bindAddress3, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { - if server3 != nil { - server3.Stop() - } + server3.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server3 + + // start server server3.Start() // sleep time.Sleep(5 * time.Second) // gRPC client for all servers - client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) + client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) defer func() { _ = client1.Close() }() if err != nil { t.Fatalf("%v", err) } - client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) defer func() { _ = client2.Close() }() if err != nil { t.Fatalf("%v", err) } - client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) defer func() { _ = client3.Close() }() @@ -1298,171 +1456,100 @@ func TestCluster_LivenessProbe(t *testing.T) { t.Fatalf("%v", err) } - // liveness check for server1 - liveness1, err := client1.LivenessProbe() + // healthiness + healthiness1, err := client1.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) if err != nil { t.Fatalf("%v", err) } - expLiveness1 := index.LivenessProbeResponse_ALIVE.String() - actLiveness1 := liveness1 - if expLiveness1 != actLiveness1 { - t.Fatalf("expected content to see %v, saw %v", expLiveness1, actLiveness1) + expHealthiness1 := index.NodeHealthCheckResponse_HEALTHY.String() + actHealthiness1 := healthiness1 + if expHealthiness1 != actHealthiness1 { + t.Fatalf("expected content to see %v, saw %v", expHealthiness1, actHealthiness1) } - // liveness check for server2 - liveness2, err := client2.LivenessProbe() + // liveness + liveness1, err := client1.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) if err != nil { t.Fatalf("%v", err) } - expLiveness2 := index.LivenessProbeResponse_ALIVE.String() - actLiveness2 := liveness2 - if expLiveness2 != actLiveness2 { - t.Fatalf("expected content to see %v, saw %v", expLiveness2, actLiveness2) + expLiveness1 := index.NodeHealthCheckResponse_ALIVE.String() + actLiveness1 := liveness1 + if expLiveness1 != actLiveness1 { + t.Fatalf("expected content to see %v, saw %v", expLiveness1, actLiveness1) } - // liveness check for server3 - liveness3, err := client3.LivenessProbe() + // readiness + readiness1, err := client1.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) if err != nil { t.Fatalf("%v", err) } - expLiveness3 := index.LivenessProbeResponse_ALIVE.String() - actLiveness3 := liveness3 - if expLiveness3 != actLiveness3 { - t.Fatalf("expected content to see %v, saw %v", expLiveness3, actLiveness3) + expReadiness1 := index.NodeHealthCheckResponse_READY.String() + actReadiness1 := readiness1 + if expReadiness1 != actReadiness1 { + t.Fatalf("expected content to see %v, saw %v", expReadiness1, actReadiness1) } -} -func TestCluster_ReadinessProbe(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + // healthiness + healthiness2, err := client2.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) if err != nil { t.Fatalf("%v", err) } - - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("server1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) + expHealthiness2 := index.NodeHealthCheckResponse_HEALTHY.String() + actHealthiness2 := healthiness2 + if expHealthiness2 != actHealthiness2 { + t.Fatalf("expected content to see %v, saw %v", expHealthiness2, actHealthiness2) } - // start server1 - server1.Start() - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("server2"), grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() + // liveness + liveness2, err := client2.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) if err != nil { t.Fatalf("%v", err) } - // start server2 - server2.Start() - - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("server3"), grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) + expLiveness2 := index.NodeHealthCheckResponse_ALIVE.String() + actLiveness2 := liveness2 + if expLiveness2 != actLiveness2 { + t.Fatalf("expected content to see %v, saw %v", expLiveness2, actLiveness2) } - // start server3 - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - // gRPC client for all servers - client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) - defer func() { - _ = client2.Close() - }() + // readiness + readiness2, err := client2.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) if err != nil { t.Fatalf("%v", err) } - client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) + expReadiness2 := index.NodeHealthCheckResponse_READY.String() + actReadiness2 := readiness2 + if expReadiness2 != actReadiness2 { + t.Fatalf("expected content to see %v, saw %v", expReadiness2, actReadiness2) } - // readiness check for server1 - readiness1, err := client1.ReadinessProbe() + // healthiness + healthiness3, err := client3.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) if err != nil { t.Fatalf("%v", err) } - expReadiness1 := index.ReadinessProbeResponse_READY.String() - actReadiness1 := readiness1 - if expReadiness1 != actReadiness1 { - t.Fatalf("expected content to see %v, saw %v", expReadiness1, actReadiness1) + expHealthiness3 := index.NodeHealthCheckResponse_HEALTHY.String() + actHealthiness3 := healthiness3 + if expHealthiness3 != actHealthiness3 { + t.Fatalf("expected content to see %v, saw %v", expHealthiness3, actHealthiness3) } - // readiness check for server2 - readiness2, err := client2.ReadinessProbe() + // liveness + liveness3, err := client3.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) if err != nil { t.Fatalf("%v", err) } - expReadiness2 := index.ReadinessProbeResponse_READY.String() - actReadiness2 := readiness2 - if expReadiness2 != actReadiness2 { - t.Fatalf("expected content to see %v, saw %v", expReadiness2, actReadiness2) + expLiveness3 := index.NodeHealthCheckResponse_ALIVE.String() + actLiveness3 := liveness3 + if expLiveness3 != actLiveness3 { + t.Fatalf("expected content to see %v, saw %v", expLiveness3, actLiveness3) } - // readiness check for server3 - readiness3, err := client3.ReadinessProbe() + // readiness + readiness3, err := client3.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) if err != nil { t.Fatalf("%v", err) } - expReadiness3 := index.ReadinessProbeResponse_READY.String() + expReadiness3 := index.NodeHealthCheckResponse_READY.String() actReadiness3 := readiness3 if expReadiness3 != actReadiness3 { t.Fatalf("expected content to see %v, saw %v", expReadiness3, actReadiness3) @@ -1472,99 +1559,146 @@ func TestCluster_ReadinessProbe(t *testing.T) { func TestCluster_GetNode(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + managerGrpcAddress1 := "" + shardId1 := "" + peerGrpcAddress1 := "" + grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() + raftStorageType1 := "boltdb" + + node1 := &index.Node{ + Id: nodeId1, + BindAddress: bindAddress1, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + } + + indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("server1"), grpcLogger, httpAccessLogger) + server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { - if server1 != nil { - server1.Stop() - } + server1.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server1 + + // start server server1.Start() - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("server2"), grpcLogger, httpAccessLogger) + managerGrpcAddress2 := "" + shardId2 := "" + peerGrpcAddress2 := grpcAddress1 + grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() + raftStorageType2 := "boltdb" + + node2 := &index.Node{ + Id: nodeId2, + BindAddress: bindAddress2, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { - if server2 != nil { - server2.Stop() - } + server2.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server2 + + // start server server2.Start() - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("server3"), grpcLogger, httpAccessLogger) + managerGrpcAddress3 := "" + shardId3 := "" + peerGrpcAddress3 := grpcAddress1 + grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() + raftStorageType3 := "boltdb" + + node3 := &index.Node{ + Id: nodeId3, + BindAddress: bindAddress3, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { - if server3 != nil { - server3.Stop() - } + server3.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server3 + + // start server server3.Start() // sleep time.Sleep(5 * time.Second) // gRPC client for all servers - client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) + client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) defer func() { _ = client1.Close() }() if err != nil { t.Fatalf("%v", err) } - client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) defer func() { _ = client2.Close() }() if err != nil { t.Fatalf("%v", err) } - client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) defer func() { _ = client3.Close() }() @@ -1573,220 +1707,204 @@ func TestCluster_GetNode(t *testing.T) { } // get all node info from all nodes - node11, err := client1.GetNode(nodeConfig1.NodeId) + node11, err := client1.NodeInfo() if err != nil { t.Fatalf("%v", err) } - expNode11 := map[string]interface{}{ - "node_config": server1.nodeConfig.ToMap(), - "state": raft.Leader.String(), + expNode11 := &index.Node{ + Id: nodeId1, + BindAddress: bindAddress1, + State: index.Node_LEADER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, } actNode11 := node11 if !reflect.DeepEqual(expNode11, actNode11) { t.Fatalf("expected content to see %v, saw %v", expNode11, actNode11) } - node12, err := client1.GetNode(nodeConfig2.NodeId) - if err != nil { - t.Fatalf("%v", err) - } - expNode12 := map[string]interface{}{ - "node_config": server2.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode12 := node12 - if !reflect.DeepEqual(expNode12, actNode12) { - t.Fatalf("expected content to see %v, saw %v", expNode12, actNode12) - } - - node13, err := client1.GetNode(nodeConfig3.NodeId) - if err != nil { - t.Fatalf("%v", err) - } - expNode13 := map[string]interface{}{ - "node_config": server3.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode13 := node13 - if !reflect.DeepEqual(expNode13, actNode13) { - t.Fatalf("expected content to see %v, saw %v", expNode13, actNode13) - } - - node21, err := client2.GetNode(nodeConfig1.NodeId) + node21, err := client2.NodeInfo() if err != nil { t.Fatalf("%v", err) } - expNode21 := map[string]interface{}{ - "node_config": server1.nodeConfig.ToMap(), - "state": raft.Leader.String(), + expNode21 := &index.Node{ + Id: nodeId2, + BindAddress: bindAddress2, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, } actNode21 := node21 if !reflect.DeepEqual(expNode21, actNode21) { t.Fatalf("expected content to see %v, saw %v", expNode21, actNode21) } - node22, err := client2.GetNode(nodeConfig2.NodeId) - if err != nil { - t.Fatalf("%v", err) - } - expNode22 := map[string]interface{}{ - "node_config": server2.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode22 := node22 - if !reflect.DeepEqual(expNode22, actNode22) { - t.Fatalf("expected content to see %v, saw %v", expNode22, actNode22) - } - - node23, err := client2.GetNode(nodeConfig3.NodeId) - if err != nil { - t.Fatalf("%v", err) - } - expNode23 := map[string]interface{}{ - "node_config": server3.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode23 := node23 - if !reflect.DeepEqual(expNode23, actNode23) { - t.Fatalf("expected content to see %v, saw %v", expNode23, actNode23) - } - - node31, err := client3.GetNode(nodeConfig1.NodeId) + node31, err := client3.NodeInfo() if err != nil { t.Fatalf("%v", err) } - expNode31 := map[string]interface{}{ - "node_config": server1.nodeConfig.ToMap(), - "state": raft.Leader.String(), + expNode31 := &index.Node{ + Id: nodeId3, + BindAddress: bindAddress3, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, } actNode31 := node31 if !reflect.DeepEqual(expNode31, actNode31) { t.Fatalf("expected content to see %v, saw %v", expNode31, actNode31) } - - node32, err := client3.GetNode(nodeConfig2.NodeId) - if err != nil { - t.Fatalf("%v", err) - } - expNode32 := map[string]interface{}{ - "node_config": server2.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode32 := node32 - if !reflect.DeepEqual(expNode32, actNode32) { - t.Fatalf("expected content to see %v, saw %v", expNode32, actNode32) - } - - node33, err := client3.GetNode(nodeConfig3.NodeId) - if err != nil { - t.Fatalf("%v", err) - } - expNode33 := map[string]interface{}{ - "node_config": server3.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode33 := node33 - if !reflect.DeepEqual(expNode33, actNode33) { - t.Fatalf("expected content to see %v, saw %v", expNode33, actNode33) - } } func TestCluster_GetCluster(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + managerGrpcAddress1 := "" + shardId1 := "" + peerGrpcAddress1 := "" + grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() + raftStorageType1 := "boltdb" + + node1 := &index.Node{ + Id: nodeId1, + BindAddress: bindAddress1, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + } + + indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() + server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("server1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } + server1.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server1 + + // start server server1.Start() - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("server2"), grpcLogger, httpAccessLogger) + managerGrpcAddress2 := "" + shardId2 := "" + peerGrpcAddress2 := grpcAddress1 + grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() + raftStorageType2 := "boltdb" + + node2 := &index.Node{ + Id: nodeId2, + BindAddress: bindAddress2, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { - if server2 != nil { - server2.Stop() - } + server2.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server2 + + // start server server2.Start() - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("server3"), grpcLogger, httpAccessLogger) + managerGrpcAddress3 := "" + shardId3 := "" + peerGrpcAddress3 := grpcAddress1 + grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() + raftStorageType3 := "boltdb" + + node3 := &index.Node{ + Id: nodeId3, + BindAddress: bindAddress3, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { - if server3 != nil { - server3.Stop() - } + server3.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server3 + + // start server server3.Start() // sleep time.Sleep(5 * time.Second) - // gRPC client for manager1 - client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) + // gRPC client for all servers + client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) defer func() { _ = client1.Close() }() if err != nil { t.Fatalf("%v", err) } - client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) defer func() { _ = client2.Close() }() if err != nil { t.Fatalf("%v", err) } - client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) defer func() { _ = client3.Close() }() @@ -1794,23 +1912,40 @@ func TestCluster_GetCluster(t *testing.T) { t.Fatalf("%v", err) } - // get cluster info from all servers - cluster1, err := client1.GetCluster() - if err != nil { - t.Fatalf("%v", err) - } - expCluster1 := map[string]interface{}{ - nodeConfig1.NodeId: map[string]interface{}{ - "node_config": nodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - nodeConfig2.NodeId: map[string]interface{}{ - "node_config": nodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - nodeConfig3.NodeId: map[string]interface{}{ - "node_config": nodeConfig3.ToMap(), - "state": raft.Follower.String(), + // get cluster info from manager1 + cluster1, err := client1.ClusterInfo() + if err != nil { + t.Fatalf("%v", err) + } + expCluster1 := &index.Cluster{ + Nodes: map[string]*index.Node{ + nodeId1: { + Id: nodeId1, + BindAddress: bindAddress1, + State: index.Node_LEADER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + }, + nodeId2: { + Id: nodeId2, + BindAddress: bindAddress2, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + }, + nodeId3: { + Id: nodeId3, + BindAddress: bindAddress3, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + }, }, } actCluster1 := cluster1 @@ -1818,22 +1953,39 @@ func TestCluster_GetCluster(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expCluster1, actCluster1) } - cluster2, err := client2.GetCluster() - if err != nil { - t.Fatalf("%v", err) - } - expCluster2 := map[string]interface{}{ - nodeConfig1.NodeId: map[string]interface{}{ - "node_config": nodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - nodeConfig2.NodeId: map[string]interface{}{ - "node_config": nodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - nodeConfig3.NodeId: map[string]interface{}{ - "node_config": nodeConfig3.ToMap(), - "state": raft.Follower.String(), + cluster2, err := client2.ClusterInfo() + if err != nil { + t.Fatalf("%v", err) + } + expCluster2 := &index.Cluster{ + Nodes: map[string]*index.Node{ + nodeId1: { + Id: nodeId1, + BindAddress: bindAddress1, + State: index.Node_LEADER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + }, + nodeId2: { + Id: nodeId2, + BindAddress: bindAddress2, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + }, + nodeId3: { + Id: nodeId3, + BindAddress: bindAddress3, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + }, }, } actCluster2 := cluster2 @@ -1841,22 +1993,39 @@ func TestCluster_GetCluster(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expCluster2, actCluster2) } - cluster3, err := client3.GetCluster() - if err != nil { - t.Fatalf("%v", err) - } - expCluster3 := map[string]interface{}{ - nodeConfig1.NodeId: map[string]interface{}{ - "node_config": nodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - nodeConfig2.NodeId: map[string]interface{}{ - "node_config": nodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - nodeConfig3.NodeId: map[string]interface{}{ - "node_config": nodeConfig3.ToMap(), - "state": raft.Follower.String(), + cluster3, err := client3.ClusterInfo() + if err != nil { + t.Fatalf("%v", err) + } + expCluster3 := &index.Cluster{ + Nodes: map[string]*index.Node{ + nodeId1: { + Id: nodeId1, + BindAddress: bindAddress1, + State: index.Node_LEADER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + }, + nodeId2: { + Id: nodeId2, + BindAddress: bindAddress2, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + }, + nodeId3: { + Id: nodeId3, + BindAddress: bindAddress3, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + }, }, } actCluster3 := cluster3 diff --git a/manager/grpc_client.go b/manager/grpc_client.go index af4c46e..6935724 100644 --- a/manager/grpc_client.go +++ b/manager/grpc_client.go @@ -45,6 +45,16 @@ func NewGRPCContext() (context.Context, context.CancelFunc) { func NewGRPCClient(address string) (*GRPCClient, error) { ctx, cancel := NewGRPCContext() + //streamRetryOpts := []grpc_retry.CallOption{ + // grpc_retry.Disable(), + //} + + //unaryRetryOpts := []grpc_retry.CallOption{ + // grpc_retry.WithBackoff(grpc_retry.BackoffLinear(100 * time.Millisecond)), + // grpc_retry.WithCodes(codes.Unavailable), + // grpc_retry.WithMax(100), + //} + dialOpts := []grpc.DialOption{ grpc.WithInsecure(), grpc.WithDefaultCallOptions( diff --git a/manager/grpc_service.go b/manager/grpc_service.go index c763412..47b65a2 100644 --- a/manager/grpc_service.go +++ b/manager/grpc_service.go @@ -204,10 +204,12 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { n1, err := json.Marshal(node) if err != nil { s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", node)) + continue } n2, err := json.Marshal(nodeSnapshot) if err != nil { s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", nodeSnapshot)) + continue } if !cmp.Equal(n1, n2) { // node updated @@ -632,8 +634,19 @@ func (s *GRPCService) Watch(req *management.WatchRequest, server management.Mana close(chans) }() + // normalize key + key := func(key string) string { + keys := make([]string, 0) + for _, k := range strings.Split(key, "/") { + if k != "" { + keys = append(keys, k) + } + } + return strings.Join(keys, "/") + }(req.Key) + for resp := range chans { - if !strings.HasPrefix(resp.Key, req.Key) { + if !strings.HasPrefix(resp.Key, key) { continue } err := server.Send(&resp) diff --git a/manager/raft_fsm.go b/manager/raft_fsm.go index 6eae3d5..325042d 100644 --- a/manager/raft_fsm.go +++ b/manager/raft_fsm.go @@ -47,7 +47,7 @@ func NewRaftFSM(path string, logger *zap.Logger) (*RaftFSM, error) { } func (f *RaftFSM) Start() error { - f.logger.Info("initialize metadata") + f.logger.Info("initialize cluster") f.cluster = &management.Cluster{Nodes: make(map[string]*management.Node, 0)} f.logger.Info("initialize store data") @@ -205,7 +205,7 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - err = f.SetValue(data["key"].(string), data["value"], true) + err = f.SetValue(data["key"].(string), data["value"], false) return &fsmResponse{error: err} case deleteKeyValue: var data map[string]interface{} diff --git a/manager/raft_server.go b/manager/raft_server.go index 257052f..76e3324 100644 --- a/manager/raft_server.go +++ b/manager/raft_server.go @@ -36,7 +36,6 @@ import ( ) type RaftServer struct { - //nodeId string node *management.Node dataDir string raftStorageType string @@ -51,7 +50,6 @@ type RaftServer struct { func NewRaftServer(node *management.Node, dataDir string, raftStorageType string, indexConfig *config.IndexConfig, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { return &RaftServer{ - //nodeId: nodeId, node: node, dataDir: dataDir, raftStorageType: raftStorageType, @@ -79,11 +77,14 @@ func (s *RaftServer) Start() error { return err } - s.logger.Info("create Raft config", zap.String("node_id", s.node.Id)) + s.logger.Info("create Raft config", zap.String("id", s.node.Id)) raftConfig := raft.DefaultConfig() raftConfig.LocalID = raft.ServerID(s.node.Id) raftConfig.SnapshotThreshold = 1024 raftConfig.LogOutput = ioutil.Discard + //if s.bootstrap { + // raftConfig.StartAsLeader = true + //} s.logger.Info("resolve TCP address", zap.String("bind_addr", s.node.BindAddress)) addr, err := net.ResolveTCPAddr("tcp", s.node.BindAddress) diff --git a/manager/server_test.go b/manager/server_test.go index 1bf019b..b61bdae 100644 --- a/manager/server_test.go +++ b/manager/server_test.go @@ -42,6 +42,9 @@ func TestServer_Start(t *testing.T) { nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() raftStorageType := "boltdb" node := &management.Node{ @@ -90,6 +93,9 @@ func TestServer_HealthCheck(t *testing.T) { nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() raftStorageType := "boltdb" node := &management.Node{ @@ -185,6 +191,9 @@ func TestServer_GetNode(t *testing.T) { nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() raftStorageType := "boltdb" node := &management.Node{ @@ -266,6 +275,9 @@ func TestServer_GetCluster(t *testing.T) { nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() raftStorageType := "boltdb" node := &management.Node{ @@ -351,6 +363,9 @@ func TestServer_SetState(t *testing.T) { nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() raftStorageType := "boltdb" node := &management.Node{ @@ -433,6 +448,9 @@ func TestServer_GetState(t *testing.T) { nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() raftStorageType := "boltdb" node := &management.Node{ @@ -515,6 +533,9 @@ func TestServer_DeleteState(t *testing.T) { nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() raftStorageType := "boltdb" node := &management.Node{ @@ -618,6 +639,9 @@ func TestCluster_Start(t *testing.T) { nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() raftStorageType1 := "boltdb" node1 := &management.Node{ @@ -655,6 +679,9 @@ func TestCluster_Start(t *testing.T) { nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() raftStorageType2 := "boltdb" node2 := &management.Node{ @@ -692,6 +719,9 @@ func TestCluster_Start(t *testing.T) { nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() raftStorageType3 := "boltdb" node3 := &management.Node{ @@ -740,6 +770,9 @@ func TestCluster_HealthCheck(t *testing.T) { nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() raftStorageType1 := "boltdb" node1 := &management.Node{ @@ -777,6 +810,9 @@ func TestCluster_HealthCheck(t *testing.T) { nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() raftStorageType2 := "boltdb" node2 := &management.Node{ @@ -814,6 +850,9 @@ func TestCluster_HealthCheck(t *testing.T) { nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() raftStorageType3 := "boltdb" node3 := &management.Node{ @@ -969,7 +1008,6 @@ func TestCluster_HealthCheck(t *testing.T) { if expReadiness3 != actReadiness3 { t.Fatalf("expected content to see %v, saw %v", expReadiness3, actReadiness3) } - } func TestCluster_GetNode(t *testing.T) { @@ -985,6 +1023,9 @@ func TestCluster_GetNode(t *testing.T) { nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() raftStorageType1 := "boltdb" node1 := &management.Node{ @@ -1022,6 +1063,9 @@ func TestCluster_GetNode(t *testing.T) { nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() raftStorageType2 := "boltdb" node2 := &management.Node{ @@ -1059,6 +1103,9 @@ func TestCluster_GetNode(t *testing.T) { nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() raftStorageType3 := "boltdb" node3 := &management.Node{ @@ -1185,6 +1232,9 @@ func TestCluster_GetCluster(t *testing.T) { nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() raftStorageType1 := "boltdb" node1 := &management.Node{ @@ -1222,6 +1272,9 @@ func TestCluster_GetCluster(t *testing.T) { nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() raftStorageType2 := "boltdb" node2 := &management.Node{ @@ -1259,6 +1312,9 @@ func TestCluster_GetCluster(t *testing.T) { nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() raftStorageType3 := "boltdb" node3 := &management.Node{ @@ -1451,6 +1507,9 @@ func TestCluster_SetState(t *testing.T) { nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() raftStorageType1 := "boltdb" node1 := &management.Node{ @@ -1488,6 +1547,9 @@ func TestCluster_SetState(t *testing.T) { nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() raftStorageType2 := "boltdb" node2 := &management.Node{ @@ -1525,6 +1587,9 @@ func TestCluster_SetState(t *testing.T) { nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() raftStorageType3 := "boltdb" node3 := &management.Node{ @@ -1701,6 +1766,9 @@ func TestCluster_GetState(t *testing.T) { nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() raftStorageType1 := "boltdb" node1 := &management.Node{ @@ -1738,6 +1806,9 @@ func TestCluster_GetState(t *testing.T) { nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() raftStorageType2 := "boltdb" node2 := &management.Node{ @@ -1775,6 +1846,9 @@ func TestCluster_GetState(t *testing.T) { nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() raftStorageType3 := "boltdb" node3 := &management.Node{ @@ -1951,6 +2025,9 @@ func TestCluster_DeleteState(t *testing.T) { nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() raftStorageType1 := "boltdb" node1 := &management.Node{ @@ -1988,6 +2065,9 @@ func TestCluster_DeleteState(t *testing.T) { nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() raftStorageType2 := "boltdb" node2 := &management.Node{ @@ -2025,6 +2105,9 @@ func TestCluster_DeleteState(t *testing.T) { nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() raftStorageType3 := "boltdb" node3 := &management.Node{ diff --git a/protobuf/distribute/distribute.pb.go b/protobuf/distribute/distribute.pb.go index b935dea..9a8174f 100644 --- a/protobuf/distribute/distribute.pb.go +++ b/protobuf/distribute/distribute.pb.go @@ -8,7 +8,6 @@ import ( fmt "fmt" proto "github.com/golang/protobuf/proto" any "github.com/golang/protobuf/ptypes/any" - empty "github.com/golang/protobuf/ptypes/empty" grpc "google.golang.org/grpc" math "math" ) @@ -24,140 +23,147 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -type LivenessProbeResponse_State int32 +type NodeHealthCheckRequest_Probe int32 const ( - LivenessProbeResponse_UNKNOWN LivenessProbeResponse_State = 0 - LivenessProbeResponse_ALIVE LivenessProbeResponse_State = 1 - LivenessProbeResponse_DEAD LivenessProbeResponse_State = 2 + NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 0 + NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 1 + NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 2 ) -var LivenessProbeResponse_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "ALIVE", - 2: "DEAD", +var NodeHealthCheckRequest_Probe_name = map[int32]string{ + 0: "HEALTHINESS", + 1: "LIVENESS", + 2: "READINESS", } -var LivenessProbeResponse_State_value = map[string]int32{ - "UNKNOWN": 0, - "ALIVE": 1, - "DEAD": 2, +var NodeHealthCheckRequest_Probe_value = map[string]int32{ + "HEALTHINESS": 0, + "LIVENESS": 1, + "READINESS": 2, } -func (x LivenessProbeResponse_State) String() string { - return proto.EnumName(LivenessProbeResponse_State_name, int32(x)) +func (x NodeHealthCheckRequest_Probe) String() string { + return proto.EnumName(NodeHealthCheckRequest_Probe_name, int32(x)) } -func (LivenessProbeResponse_State) EnumDescriptor() ([]byte, []int) { +func (NodeHealthCheckRequest_Probe) EnumDescriptor() ([]byte, []int) { return fileDescriptor_0b1b3e8a99d31c9c, []int{0, 0} } -type ReadinessProbeResponse_State int32 +type NodeHealthCheckResponse_State int32 const ( - ReadinessProbeResponse_UNKNOWN ReadinessProbeResponse_State = 0 - ReadinessProbeResponse_READY ReadinessProbeResponse_State = 1 - ReadinessProbeResponse_NOT_READY ReadinessProbeResponse_State = 2 + NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 0 + NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 1 + NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 2 + NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 3 + NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 4 + NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 5 ) -var ReadinessProbeResponse_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "READY", - 2: "NOT_READY", +var NodeHealthCheckResponse_State_name = map[int32]string{ + 0: "HEALTHY", + 1: "UNHEALTHY", + 2: "ALIVE", + 3: "DEAD", + 4: "READY", + 5: "NOT_READY", } -var ReadinessProbeResponse_State_value = map[string]int32{ - "UNKNOWN": 0, - "READY": 1, - "NOT_READY": 2, +var NodeHealthCheckResponse_State_value = map[string]int32{ + "HEALTHY": 0, + "UNHEALTHY": 1, + "ALIVE": 2, + "DEAD": 3, + "READY": 4, + "NOT_READY": 5, } -func (x ReadinessProbeResponse_State) String() string { - return proto.EnumName(ReadinessProbeResponse_State_name, int32(x)) +func (x NodeHealthCheckResponse_State) String() string { + return proto.EnumName(NodeHealthCheckResponse_State_name, int32(x)) } -func (ReadinessProbeResponse_State) EnumDescriptor() ([]byte, []int) { +func (NodeHealthCheckResponse_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor_0b1b3e8a99d31c9c, []int{1, 0} } -// use for health check -type LivenessProbeResponse struct { - State LivenessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=distribute.LivenessProbeResponse_State" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type NodeHealthCheckRequest struct { + Probe NodeHealthCheckRequest_Probe `protobuf:"varint,1,opt,name=probe,proto3,enum=distribute.NodeHealthCheckRequest_Probe" json:"probe,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *LivenessProbeResponse) Reset() { *m = LivenessProbeResponse{} } -func (m *LivenessProbeResponse) String() string { return proto.CompactTextString(m) } -func (*LivenessProbeResponse) ProtoMessage() {} -func (*LivenessProbeResponse) Descriptor() ([]byte, []int) { +func (m *NodeHealthCheckRequest) Reset() { *m = NodeHealthCheckRequest{} } +func (m *NodeHealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*NodeHealthCheckRequest) ProtoMessage() {} +func (*NodeHealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor_0b1b3e8a99d31c9c, []int{0} } -func (m *LivenessProbeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LivenessProbeResponse.Unmarshal(m, b) +func (m *NodeHealthCheckRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeHealthCheckRequest.Unmarshal(m, b) } -func (m *LivenessProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LivenessProbeResponse.Marshal(b, m, deterministic) +func (m *NodeHealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeHealthCheckRequest.Marshal(b, m, deterministic) } -func (m *LivenessProbeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LivenessProbeResponse.Merge(m, src) +func (m *NodeHealthCheckRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeHealthCheckRequest.Merge(m, src) } -func (m *LivenessProbeResponse) XXX_Size() int { - return xxx_messageInfo_LivenessProbeResponse.Size(m) +func (m *NodeHealthCheckRequest) XXX_Size() int { + return xxx_messageInfo_NodeHealthCheckRequest.Size(m) } -func (m *LivenessProbeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LivenessProbeResponse.DiscardUnknown(m) +func (m *NodeHealthCheckRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeHealthCheckRequest.DiscardUnknown(m) } -var xxx_messageInfo_LivenessProbeResponse proto.InternalMessageInfo +var xxx_messageInfo_NodeHealthCheckRequest proto.InternalMessageInfo -func (m *LivenessProbeResponse) GetState() LivenessProbeResponse_State { +func (m *NodeHealthCheckRequest) GetProbe() NodeHealthCheckRequest_Probe { if m != nil { - return m.State + return m.Probe } - return LivenessProbeResponse_UNKNOWN + return NodeHealthCheckRequest_HEALTHINESS } -// use for health check -type ReadinessProbeResponse struct { - State ReadinessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=distribute.ReadinessProbeResponse_State" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type NodeHealthCheckResponse struct { + State NodeHealthCheckResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=distribute.NodeHealthCheckResponse_State" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ReadinessProbeResponse) Reset() { *m = ReadinessProbeResponse{} } -func (m *ReadinessProbeResponse) String() string { return proto.CompactTextString(m) } -func (*ReadinessProbeResponse) ProtoMessage() {} -func (*ReadinessProbeResponse) Descriptor() ([]byte, []int) { +func (m *NodeHealthCheckResponse) Reset() { *m = NodeHealthCheckResponse{} } +func (m *NodeHealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*NodeHealthCheckResponse) ProtoMessage() {} +func (*NodeHealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor_0b1b3e8a99d31c9c, []int{1} } -func (m *ReadinessProbeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadinessProbeResponse.Unmarshal(m, b) +func (m *NodeHealthCheckResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeHealthCheckResponse.Unmarshal(m, b) } -func (m *ReadinessProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadinessProbeResponse.Marshal(b, m, deterministic) +func (m *NodeHealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeHealthCheckResponse.Marshal(b, m, deterministic) } -func (m *ReadinessProbeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadinessProbeResponse.Merge(m, src) +func (m *NodeHealthCheckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeHealthCheckResponse.Merge(m, src) } -func (m *ReadinessProbeResponse) XXX_Size() int { - return xxx_messageInfo_ReadinessProbeResponse.Size(m) +func (m *NodeHealthCheckResponse) XXX_Size() int { + return xxx_messageInfo_NodeHealthCheckResponse.Size(m) } -func (m *ReadinessProbeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReadinessProbeResponse.DiscardUnknown(m) +func (m *NodeHealthCheckResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeHealthCheckResponse.DiscardUnknown(m) } -var xxx_messageInfo_ReadinessProbeResponse proto.InternalMessageInfo +var xxx_messageInfo_NodeHealthCheckResponse proto.InternalMessageInfo -func (m *ReadinessProbeResponse) GetState() ReadinessProbeResponse_State { +func (m *NodeHealthCheckResponse) GetState() NodeHealthCheckResponse_State { if m != nil { return m.State } - return ReadinessProbeResponse_UNKNOWN + return NodeHealthCheckResponse_HEALTHY } type GetDocumentRequest struct { @@ -481,10 +487,10 @@ func (m *SearchResponse) GetSearchResult() *any.Any { } func init() { - proto.RegisterEnum("distribute.LivenessProbeResponse_State", LivenessProbeResponse_State_name, LivenessProbeResponse_State_value) - proto.RegisterEnum("distribute.ReadinessProbeResponse_State", ReadinessProbeResponse_State_name, ReadinessProbeResponse_State_value) - proto.RegisterType((*LivenessProbeResponse)(nil), "distribute.LivenessProbeResponse") - proto.RegisterType((*ReadinessProbeResponse)(nil), "distribute.ReadinessProbeResponse") + proto.RegisterEnum("distribute.NodeHealthCheckRequest_Probe", NodeHealthCheckRequest_Probe_name, NodeHealthCheckRequest_Probe_value) + proto.RegisterEnum("distribute.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) + proto.RegisterType((*NodeHealthCheckRequest)(nil), "distribute.NodeHealthCheckRequest") + proto.RegisterType((*NodeHealthCheckResponse)(nil), "distribute.NodeHealthCheckResponse") proto.RegisterType((*GetDocumentRequest)(nil), "distribute.GetDocumentRequest") proto.RegisterType((*GetDocumentResponse)(nil), "distribute.GetDocumentResponse") proto.RegisterType((*IndexDocumentRequest)(nil), "distribute.IndexDocumentRequest") @@ -500,40 +506,42 @@ func init() { } var fileDescriptor_0b1b3e8a99d31c9c = []byte{ - // 528 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0xdd, 0x8f, 0xd2, 0x4c, - 0x14, 0xc6, 0x29, 0xef, 0x0b, 0xca, 0xc1, 0x36, 0x64, 0x04, 0xe2, 0xd6, 0x44, 0xd7, 0xc9, 0x26, - 0x8b, 0xd1, 0x2d, 0x09, 0x5e, 0x19, 0xa3, 0x09, 0x5a, 0x62, 0x36, 0x4b, 0xba, 0x9b, 0xee, 0xfa, - 0x7d, 0xb1, 0x69, 0xe9, 0x59, 0xb6, 0xb1, 0x74, 0xb0, 0x33, 0x35, 0xee, 0xa5, 0x77, 0xfe, 0xc5, - 0x5e, 0x1b, 0xfa, 0x81, 0x1d, 0x28, 0xe0, 0x1d, 0x73, 0xce, 0x79, 0x7e, 0xf3, 0xf4, 0xe4, 0x19, - 0xe0, 0x60, 0x1e, 0x31, 0xc1, 0xdc, 0xf8, 0xaa, 0xef, 0xf9, 0x5c, 0x44, 0xbe, 0x1b, 0x0b, 0x2c, - 0xfc, 0x34, 0x92, 0x36, 0x81, 0xbf, 0x15, 0x7d, 0x6f, 0xca, 0xd8, 0x34, 0xc0, 0xfe, 0x52, 0xe8, - 0x84, 0x37, 0xe9, 0x98, 0x7e, 0x7f, 0xb5, 0x85, 0xb3, 0xb9, 0xc8, 0x9a, 0xf4, 0xa7, 0x02, 0x9d, - 0xb1, 0xff, 0x1d, 0x43, 0xe4, 0xfc, 0x2c, 0x62, 0x2e, 0xda, 0xc8, 0xe7, 0x2c, 0xe4, 0x48, 0x5e, - 0x42, 0x8d, 0x0b, 0x47, 0xe0, 0x3d, 0x65, 0x5f, 0xe9, 0x69, 0x83, 0x43, 0xa3, 0x70, 0x7f, 0xa9, - 0xc2, 0x38, 0x5f, 0x8c, 0xdb, 0xa9, 0x8a, 0x3e, 0x86, 0x5a, 0x72, 0x26, 0x4d, 0xb8, 0xf5, 0xce, - 0x3a, 0xb1, 0x4e, 0x3f, 0x58, 0xad, 0x0a, 0x69, 0x40, 0x6d, 0x38, 0x3e, 0x7e, 0x3f, 0x6a, 0x29, - 0xe4, 0x36, 0xfc, 0x6f, 0x8e, 0x86, 0x66, 0xab, 0x4a, 0x7f, 0x29, 0xd0, 0xb5, 0xd1, 0xf1, 0xfc, - 0x75, 0x13, 0xaf, 0x64, 0x13, 0xbd, 0xa2, 0x89, 0x72, 0x89, 0xec, 0xc2, 0xd8, 0xe4, 0xc2, 0x1e, - 0x0d, 0xcd, 0x4f, 0x2d, 0x85, 0xa8, 0xd0, 0xb0, 0x4e, 0x2f, 0x2e, 0xd3, 0x63, 0x95, 0x1e, 0x00, - 0x79, 0x8b, 0xc2, 0x64, 0x93, 0x78, 0x86, 0xa1, 0xb0, 0xf1, 0x5b, 0x8c, 0x5c, 0x10, 0x0d, 0xaa, - 0xbe, 0x97, 0x58, 0x68, 0xd8, 0x55, 0xdf, 0xa3, 0x6f, 0xe0, 0xae, 0x34, 0x95, 0x99, 0x7d, 0x0a, - 0xf5, 0x2b, 0x1f, 0x03, 0x8f, 0x27, 0xa3, 0xcd, 0x41, 0xdb, 0x48, 0x37, 0x6f, 0xe4, 0x9b, 0x37, - 0x86, 0xe1, 0x8d, 0x9d, 0xcd, 0xd0, 0x0b, 0x68, 0x1f, 0x87, 0x1e, 0xfe, 0xd8, 0x71, 0x59, 0x81, - 0x5a, 0xfd, 0x07, 0xea, 0x11, 0x74, 0x56, 0xa8, 0x99, 0xb9, 0x36, 0xd4, 0x26, 0x2c, 0x0e, 0x45, - 0x42, 0xae, 0xd9, 0xe9, 0x81, 0x1e, 0x42, 0xc7, 0xc4, 0x00, 0x05, 0xee, 0xfa, 0x64, 0x03, 0xba, - 0xab, 0x83, 0x5b, 0xc1, 0x63, 0x50, 0xcf, 0xd1, 0x89, 0x26, 0xd7, 0x39, 0xf0, 0x05, 0x68, 0x3c, - 0x29, 0x5c, 0x46, 0x69, 0x65, 0xeb, 0x92, 0x54, 0x5e, 0x14, 0xd3, 0x13, 0xd0, 0x72, 0x5a, 0x76, - 0xeb, 0x73, 0x50, 0x97, 0x38, 0x1e, 0x07, 0xdb, 0x69, 0x77, 0x72, 0xda, 0x62, 0x72, 0xf0, 0xfb, - 0x3f, 0x00, 0x73, 0x19, 0x23, 0x32, 0x06, 0x55, 0x8a, 0x33, 0xe9, 0xae, 0x31, 0x46, 0x8b, 0x07, - 0xa3, 0x3f, 0xda, 0xf9, 0x02, 0x68, 0x85, 0x58, 0xa0, 0xc9, 0xb9, 0xdc, 0x88, 0xa3, 0xbb, 0xb3, - 0x4c, 0x2b, 0xe4, 0x0c, 0x9a, 0x85, 0xa8, 0x91, 0x07, 0x45, 0xd1, 0x7a, 0x52, 0xf5, 0x87, 0x1b, - 0xfb, 0x4b, 0xe2, 0x47, 0x50, 0xa5, 0x84, 0x90, 0xfd, 0xa2, 0xa6, 0x2c, 0x92, 0xf2, 0x97, 0x97, - 0xc6, 0x8b, 0x56, 0x7a, 0x0a, 0xf9, 0x02, 0x9a, 0x9c, 0x11, 0x22, 0x09, 0x4b, 0x83, 0x26, 0xaf, - 0xa1, 0x3c, 0x62, 0x09, 0x7c, 0x08, 0xf5, 0x34, 0x02, 0x64, 0xaf, 0xa8, 0x90, 0x42, 0xa6, 0xeb, - 0x65, 0xad, 0x1c, 0xf2, 0xfa, 0xe8, 0xf3, 0x93, 0xa9, 0x2f, 0xae, 0x63, 0xd7, 0x98, 0xb0, 0x59, - 0x7f, 0xc6, 0x78, 0xfc, 0xd5, 0xe9, 0xbb, 0x81, 0xc3, 0x45, 0xbf, 0xe4, 0xff, 0xd6, 0xad, 0x27, - 0xc5, 0x67, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf3, 0xf4, 0x4b, 0x0a, 0x8d, 0x05, 0x00, 0x00, + // 547 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xef, 0x6f, 0xd2, 0x40, + 0x18, 0xa6, 0x6c, 0xc5, 0xf1, 0x32, 0x18, 0x39, 0xd9, 0x74, 0xfd, 0xa0, 0xf3, 0x5c, 0x22, 0x46, + 0x57, 0x12, 0x8c, 0x1f, 0x8c, 0x89, 0xa6, 0xae, 0x44, 0x88, 0x04, 0x97, 0x82, 0xc6, 0xa9, 0xc9, + 0xd2, 0x1f, 0x37, 0x68, 0x56, 0x7a, 0xd8, 0xbb, 0x26, 0xee, 0xaf, 0xf0, 0x3f, 0xf1, 0xa3, 0x7f, + 0x9f, 0x69, 0x8f, 0x62, 0xcb, 0x6a, 0xd9, 0x37, 0xde, 0xf7, 0x7d, 0x9e, 0xe7, 0x9e, 0xbb, 0xf7, + 0xa1, 0x70, 0xbc, 0x08, 0x28, 0xa7, 0x56, 0x78, 0xd9, 0x71, 0x5c, 0xc6, 0x03, 0xd7, 0x0a, 0x39, + 0x49, 0xfd, 0x54, 0xe3, 0x31, 0x82, 0x7f, 0x1d, 0xe5, 0x70, 0x4a, 0xe9, 0xd4, 0x23, 0x9d, 0x15, + 0xd1, 0xf4, 0xaf, 0x05, 0x0c, 0xff, 0x92, 0xe0, 0x60, 0x44, 0x1d, 0xd2, 0x27, 0xa6, 0xc7, 0x67, + 0xa7, 0x33, 0x62, 0x5f, 0x19, 0xe4, 0x47, 0x48, 0x18, 0x47, 0x6f, 0x40, 0x5e, 0x04, 0xd4, 0x22, + 0xf7, 0xa5, 0x23, 0xa9, 0xdd, 0xe8, 0xb6, 0xd5, 0xd4, 0x19, 0xf9, 0x14, 0xf5, 0x2c, 0xc2, 0x1b, + 0x82, 0x86, 0x5f, 0x82, 0x1c, 0xd7, 0x68, 0x0f, 0x6a, 0xfd, 0x9e, 0x36, 0x9c, 0xf4, 0x07, 0xa3, + 0xde, 0x78, 0xdc, 0x2c, 0xa1, 0x5d, 0xd8, 0x19, 0x0e, 0x3e, 0xf7, 0xe2, 0x4a, 0x42, 0x75, 0xa8, + 0x1a, 0x3d, 0x4d, 0x17, 0xc3, 0x32, 0xfe, 0x2d, 0xc1, 0xbd, 0x1b, 0xf2, 0x6c, 0x41, 0x7d, 0x46, + 0xd0, 0x5b, 0x90, 0x19, 0x37, 0x79, 0x62, 0xe9, 0x69, 0xa1, 0x25, 0xc1, 0x51, 0xc7, 0x11, 0xc1, + 0x10, 0x3c, 0x6c, 0x80, 0x1c, 0xd7, 0xa8, 0x06, 0x77, 0x84, 0xa7, 0xf3, 0x66, 0x29, 0x72, 0xf0, + 0x69, 0x94, 0x94, 0x12, 0xaa, 0x82, 0xac, 0x45, 0xfe, 0x9a, 0x65, 0xb4, 0x03, 0xdb, 0x7a, 0x4f, + 0xd3, 0x9b, 0x5b, 0x51, 0x33, 0x72, 0x79, 0xde, 0xdc, 0x8e, 0xe0, 0xa3, 0x8f, 0x93, 0x0b, 0x51, + 0xca, 0xf8, 0x18, 0xd0, 0x7b, 0xc2, 0x75, 0x6a, 0x87, 0x73, 0xe2, 0xf3, 0xe4, 0xf5, 0x1a, 0x50, + 0x76, 0x9d, 0xd8, 0x67, 0xd5, 0x28, 0xbb, 0x0e, 0x3e, 0x85, 0xbb, 0x19, 0xd4, 0xf2, 0x46, 0xcf, + 0xa1, 0x72, 0xe9, 0x12, 0xcf, 0x61, 0x31, 0xb4, 0xd6, 0x6d, 0xa9, 0x62, 0x57, 0x6a, 0xb2, 0x2b, + 0x55, 0xf3, 0xaf, 0x8d, 0x25, 0x06, 0x4f, 0xa0, 0x35, 0xf0, 0x1d, 0xf2, 0x73, 0xc3, 0x61, 0x29, + 0xd5, 0xf2, 0x2d, 0x54, 0x4f, 0x60, 0x7f, 0x4d, 0x75, 0x69, 0xae, 0x05, 0xb2, 0x4d, 0x43, 0x9f, + 0xc7, 0xca, 0xb2, 0x21, 0x0a, 0xfc, 0x04, 0xf6, 0x75, 0xe2, 0x11, 0x4e, 0x36, 0x5d, 0x59, 0x85, + 0x83, 0x75, 0x60, 0xa1, 0xf0, 0x10, 0xea, 0x63, 0x62, 0x06, 0xf6, 0x2c, 0x11, 0x7c, 0x0d, 0x0d, + 0x16, 0x37, 0x2e, 0x02, 0xd1, 0x29, 0x7c, 0xa4, 0x3a, 0x4b, 0x93, 0xf1, 0x07, 0x68, 0x24, 0x6a, + 0xcb, 0x53, 0x5f, 0x41, 0x7d, 0x25, 0xc7, 0x42, 0xaf, 0x58, 0x6d, 0x37, 0x51, 0x8b, 0x90, 0xdd, + 0x3f, 0x5b, 0x00, 0xfa, 0x2a, 0x6b, 0xe8, 0x3b, 0xec, 0xad, 0xc5, 0x0d, 0xe1, 0xcd, 0x7f, 0x0f, + 0xe5, 0xf1, 0x2d, 0xf2, 0x8a, 0x4b, 0xe8, 0x0c, 0x6a, 0xa9, 0xa8, 0xa0, 0x07, 0x69, 0xd6, 0xcd, + 0xa4, 0x29, 0x0f, 0xff, 0x3b, 0x5f, 0x29, 0x7e, 0x81, 0x7a, 0x66, 0xc3, 0xe8, 0x28, 0xcd, 0xc9, + 0x8b, 0x94, 0xf2, 0xa8, 0x00, 0x91, 0xe8, 0xb6, 0x25, 0xf4, 0x0d, 0x1a, 0xd9, 0x1d, 0xa3, 0x0c, + 0x31, 0x37, 0x28, 0x0a, 0x2e, 0x82, 0xa4, 0xc4, 0x35, 0xa8, 0x88, 0x15, 0xa2, 0xc3, 0x34, 0x23, + 0x13, 0x12, 0x45, 0xc9, 0x1b, 0x25, 0x22, 0xef, 0x4e, 0xbe, 0x3e, 0x9b, 0xba, 0x7c, 0x16, 0x5a, + 0xaa, 0x4d, 0xe7, 0x9d, 0x39, 0x65, 0xe1, 0x95, 0xd9, 0xb1, 0x3c, 0x93, 0xf1, 0x4e, 0xce, 0x67, + 0xd4, 0xaa, 0xc4, 0xcd, 0x17, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x35, 0xcf, 0xa0, 0x84, 0x64, + 0x05, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -548,8 +556,7 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type DistributeClient interface { - LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) - ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) + NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Distribute_IndexDocumentClient, error) DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Distribute_DeleteDocumentClient, error) @@ -564,18 +571,9 @@ func NewDistributeClient(cc *grpc.ClientConn) DistributeClient { return &distributeClient{cc} } -func (c *distributeClient) LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) { - out := new(LivenessProbeResponse) - err := c.cc.Invoke(ctx, "/distribute.Distribute/LivenessProbe", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *distributeClient) ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) { - out := new(ReadinessProbeResponse) - err := c.cc.Invoke(ctx, "/distribute.Distribute/ReadinessProbe", in, out, opts...) +func (c *distributeClient) NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) { + out := new(NodeHealthCheckResponse) + err := c.cc.Invoke(ctx, "/distribute.Distribute/NodeHealthCheck", in, out, opts...) if err != nil { return nil, err } @@ -670,8 +668,7 @@ func (c *distributeClient) Search(ctx context.Context, in *SearchRequest, opts . // DistributeServer is the server API for Distribute service. type DistributeServer interface { - LivenessProbe(context.Context, *empty.Empty) (*LivenessProbeResponse, error) - ReadinessProbe(context.Context, *empty.Empty) (*ReadinessProbeResponse, error) + NodeHealthCheck(context.Context, *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) GetDocument(context.Context, *GetDocumentRequest) (*GetDocumentResponse, error) IndexDocument(Distribute_IndexDocumentServer) error DeleteDocument(Distribute_DeleteDocumentServer) error @@ -682,38 +679,20 @@ func RegisterDistributeServer(s *grpc.Server, srv DistributeServer) { s.RegisterService(&_Distribute_serviceDesc, srv) } -func _Distribute_LivenessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DistributeServer).LivenessProbe(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/distribute.Distribute/LivenessProbe", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).LivenessProbe(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Distribute_ReadinessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) +func _Distribute_NodeHealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeHealthCheckRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(DistributeServer).ReadinessProbe(ctx, in) + return srv.(DistributeServer).NodeHealthCheck(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/distribute.Distribute/ReadinessProbe", + FullMethod: "/distribute.Distribute/NodeHealthCheck", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).ReadinessProbe(ctx, req.(*empty.Empty)) + return srv.(DistributeServer).NodeHealthCheck(ctx, req.(*NodeHealthCheckRequest)) } return interceptor(ctx, in, info, handler) } @@ -811,12 +790,8 @@ var _Distribute_serviceDesc = grpc.ServiceDesc{ HandlerType: (*DistributeServer)(nil), Methods: []grpc.MethodDesc{ { - MethodName: "LivenessProbe", - Handler: _Distribute_LivenessProbe_Handler, - }, - { - MethodName: "ReadinessProbe", - Handler: _Distribute_ReadinessProbe_Handler, + MethodName: "NodeHealthCheck", + Handler: _Distribute_NodeHealthCheck_Handler, }, { MethodName: "GetDocument", diff --git a/protobuf/distribute/distribute.proto b/protobuf/distribute/distribute.proto index c2c6f20..7f08a56 100644 --- a/protobuf/distribute/distribute.proto +++ b/protobuf/distribute/distribute.proto @@ -15,15 +15,13 @@ syntax = "proto3"; import "google/protobuf/any.proto"; -import "google/protobuf/empty.proto"; package distribute; option go_package = "github.com/mosuka/blast/protobuf/distribute"; service Distribute { - rpc LivenessProbe (google.protobuf.Empty) returns (LivenessProbeResponse) {} - rpc ReadinessProbe (google.protobuf.Empty) returns (ReadinessProbeResponse) {} + rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) {} rpc GetDocument (GetDocumentRequest) returns (GetDocumentResponse) {} rpc IndexDocument (stream IndexDocumentRequest) returns (IndexDocumentResponse) {} @@ -31,22 +29,23 @@ service Distribute { rpc Search (SearchRequest) returns (SearchResponse) {} } -// use for health check -message LivenessProbeResponse { - enum State { - UNKNOWN = 0; - ALIVE = 1; - DEAD = 2; +message NodeHealthCheckRequest { + enum Probe { + HEALTHINESS = 0; + LIVENESS = 1; + READINESS = 2; } - State state = 1; + Probe probe = 1; } -// use for health check -message ReadinessProbeResponse { +message NodeHealthCheckResponse { enum State { - UNKNOWN = 0; - READY = 1; - NOT_READY = 2; + HEALTHY = 0; + UNHEALTHY = 1; + ALIVE = 2; + DEAD = 3; + READY = 4; + NOT_READY = 5; } State state = 1; } diff --git a/protobuf/index/index.pb.go b/protobuf/index/index.pb.go index e0e0dcf..8b42e8c 100644 --- a/protobuf/index/index.pb.go +++ b/protobuf/index/index.pb.go @@ -24,352 +24,568 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -type LivenessProbeResponse_State int32 +type NodeHealthCheckRequest_Probe int32 const ( - LivenessProbeResponse_UNKNOWN LivenessProbeResponse_State = 0 - LivenessProbeResponse_ALIVE LivenessProbeResponse_State = 1 - LivenessProbeResponse_DEAD LivenessProbeResponse_State = 2 + NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 0 + NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 1 + NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 2 ) -var LivenessProbeResponse_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "ALIVE", - 2: "DEAD", +var NodeHealthCheckRequest_Probe_name = map[int32]string{ + 0: "HEALTHINESS", + 1: "LIVENESS", + 2: "READINESS", } -var LivenessProbeResponse_State_value = map[string]int32{ - "UNKNOWN": 0, - "ALIVE": 1, - "DEAD": 2, +var NodeHealthCheckRequest_Probe_value = map[string]int32{ + "HEALTHINESS": 0, + "LIVENESS": 1, + "READINESS": 2, } -func (x LivenessProbeResponse_State) String() string { - return proto.EnumName(LivenessProbeResponse_State_name, int32(x)) +func (x NodeHealthCheckRequest_Probe) String() string { + return proto.EnumName(NodeHealthCheckRequest_Probe_name, int32(x)) } -func (LivenessProbeResponse_State) EnumDescriptor() ([]byte, []int) { +func (NodeHealthCheckRequest_Probe) EnumDescriptor() ([]byte, []int) { return fileDescriptor_7b2daf652facb3ae, []int{0, 0} } -type ReadinessProbeResponse_State int32 +type NodeHealthCheckResponse_State int32 + +const ( + NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 0 + NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 1 + NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 2 + NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 3 + NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 4 + NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 5 +) + +var NodeHealthCheckResponse_State_name = map[int32]string{ + 0: "HEALTHY", + 1: "UNHEALTHY", + 2: "ALIVE", + 3: "DEAD", + 4: "READY", + 5: "NOT_READY", +} + +var NodeHealthCheckResponse_State_value = map[string]int32{ + "HEALTHY": 0, + "UNHEALTHY": 1, + "ALIVE": 2, + "DEAD": 3, + "READY": 4, + "NOT_READY": 5, +} + +func (x NodeHealthCheckResponse_State) String() string { + return proto.EnumName(NodeHealthCheckResponse_State_name, int32(x)) +} + +func (NodeHealthCheckResponse_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{1, 0} +} + +type Node_State int32 const ( - ReadinessProbeResponse_UNKNOWN ReadinessProbeResponse_State = 0 - ReadinessProbeResponse_READY ReadinessProbeResponse_State = 1 - ReadinessProbeResponse_NOT_READY ReadinessProbeResponse_State = 2 + Node_UNKNOWN Node_State = 0 + Node_FOLLOWER Node_State = 1 + Node_CANDIDATE Node_State = 2 + Node_LEADER Node_State = 3 + Node_SHUTDOWN Node_State = 4 ) -var ReadinessProbeResponse_State_name = map[int32]string{ +var Node_State_name = map[int32]string{ 0: "UNKNOWN", - 1: "READY", - 2: "NOT_READY", + 1: "FOLLOWER", + 2: "CANDIDATE", + 3: "LEADER", + 4: "SHUTDOWN", } -var ReadinessProbeResponse_State_value = map[string]int32{ +var Node_State_value = map[string]int32{ "UNKNOWN": 0, - "READY": 1, - "NOT_READY": 2, + "FOLLOWER": 1, + "CANDIDATE": 2, + "LEADER": 3, + "SHUTDOWN": 4, } -func (x ReadinessProbeResponse_State) String() string { - return proto.EnumName(ReadinessProbeResponse_State_name, int32(x)) +func (x Node_State) String() string { + return proto.EnumName(Node_State_name, int32(x)) } -func (ReadinessProbeResponse_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{1, 0} +func (Node_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{3, 0} } -// use for health check -type LivenessProbeResponse struct { - State LivenessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=index.LivenessProbeResponse_State" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type ClusterWatchResponse_Event int32 + +const ( + ClusterWatchResponse_UNKNOWN ClusterWatchResponse_Event = 0 + ClusterWatchResponse_JOIN ClusterWatchResponse_Event = 1 + ClusterWatchResponse_LEAVE ClusterWatchResponse_Event = 2 + ClusterWatchResponse_UPDATE ClusterWatchResponse_Event = 3 +) + +var ClusterWatchResponse_Event_name = map[int32]string{ + 0: "UNKNOWN", + 1: "JOIN", + 2: "LEAVE", + 3: "UPDATE", } -func (m *LivenessProbeResponse) Reset() { *m = LivenessProbeResponse{} } -func (m *LivenessProbeResponse) String() string { return proto.CompactTextString(m) } -func (*LivenessProbeResponse) ProtoMessage() {} -func (*LivenessProbeResponse) Descriptor() ([]byte, []int) { +var ClusterWatchResponse_Event_value = map[string]int32{ + "UNKNOWN": 0, + "JOIN": 1, + "LEAVE": 2, + "UPDATE": 3, +} + +func (x ClusterWatchResponse_Event) String() string { + return proto.EnumName(ClusterWatchResponse_Event_name, int32(x)) +} + +func (ClusterWatchResponse_Event) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{9, 0} +} + +type NodeHealthCheckRequest struct { + Probe NodeHealthCheckRequest_Probe `protobuf:"varint,1,opt,name=probe,proto3,enum=index.NodeHealthCheckRequest_Probe" json:"probe,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeHealthCheckRequest) Reset() { *m = NodeHealthCheckRequest{} } +func (m *NodeHealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*NodeHealthCheckRequest) ProtoMessage() {} +func (*NodeHealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor_7b2daf652facb3ae, []int{0} } -func (m *LivenessProbeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LivenessProbeResponse.Unmarshal(m, b) +func (m *NodeHealthCheckRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeHealthCheckRequest.Unmarshal(m, b) } -func (m *LivenessProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LivenessProbeResponse.Marshal(b, m, deterministic) +func (m *NodeHealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeHealthCheckRequest.Marshal(b, m, deterministic) } -func (m *LivenessProbeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LivenessProbeResponse.Merge(m, src) +func (m *NodeHealthCheckRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeHealthCheckRequest.Merge(m, src) } -func (m *LivenessProbeResponse) XXX_Size() int { - return xxx_messageInfo_LivenessProbeResponse.Size(m) +func (m *NodeHealthCheckRequest) XXX_Size() int { + return xxx_messageInfo_NodeHealthCheckRequest.Size(m) } -func (m *LivenessProbeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LivenessProbeResponse.DiscardUnknown(m) +func (m *NodeHealthCheckRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeHealthCheckRequest.DiscardUnknown(m) } -var xxx_messageInfo_LivenessProbeResponse proto.InternalMessageInfo +var xxx_messageInfo_NodeHealthCheckRequest proto.InternalMessageInfo -func (m *LivenessProbeResponse) GetState() LivenessProbeResponse_State { +func (m *NodeHealthCheckRequest) GetProbe() NodeHealthCheckRequest_Probe { if m != nil { - return m.State + return m.Probe } - return LivenessProbeResponse_UNKNOWN + return NodeHealthCheckRequest_HEALTHINESS } -// use for health check -type ReadinessProbeResponse struct { - State ReadinessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=index.ReadinessProbeResponse_State" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type NodeHealthCheckResponse struct { + State NodeHealthCheckResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=index.NodeHealthCheckResponse_State" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ReadinessProbeResponse) Reset() { *m = ReadinessProbeResponse{} } -func (m *ReadinessProbeResponse) String() string { return proto.CompactTextString(m) } -func (*ReadinessProbeResponse) ProtoMessage() {} -func (*ReadinessProbeResponse) Descriptor() ([]byte, []int) { +func (m *NodeHealthCheckResponse) Reset() { *m = NodeHealthCheckResponse{} } +func (m *NodeHealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*NodeHealthCheckResponse) ProtoMessage() {} +func (*NodeHealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor_7b2daf652facb3ae, []int{1} } -func (m *ReadinessProbeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadinessProbeResponse.Unmarshal(m, b) +func (m *NodeHealthCheckResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeHealthCheckResponse.Unmarshal(m, b) } -func (m *ReadinessProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadinessProbeResponse.Marshal(b, m, deterministic) +func (m *NodeHealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeHealthCheckResponse.Marshal(b, m, deterministic) } -func (m *ReadinessProbeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadinessProbeResponse.Merge(m, src) +func (m *NodeHealthCheckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeHealthCheckResponse.Merge(m, src) } -func (m *ReadinessProbeResponse) XXX_Size() int { - return xxx_messageInfo_ReadinessProbeResponse.Size(m) +func (m *NodeHealthCheckResponse) XXX_Size() int { + return xxx_messageInfo_NodeHealthCheckResponse.Size(m) } -func (m *ReadinessProbeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReadinessProbeResponse.DiscardUnknown(m) +func (m *NodeHealthCheckResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeHealthCheckResponse.DiscardUnknown(m) } -var xxx_messageInfo_ReadinessProbeResponse proto.InternalMessageInfo +var xxx_messageInfo_NodeHealthCheckResponse proto.InternalMessageInfo -func (m *ReadinessProbeResponse) GetState() ReadinessProbeResponse_State { +func (m *NodeHealthCheckResponse) GetState() NodeHealthCheckResponse_State { if m != nil { return m.State } - return ReadinessProbeResponse_UNKNOWN + return NodeHealthCheckResponse_HEALTHY } -// use for raft cluster status -type GetNodeRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +type Metadata struct { + GrpcAddress string `protobuf:"bytes,1,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` + HttpAddress string `protobuf:"bytes,2,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *GetNodeRequest) Reset() { *m = GetNodeRequest{} } -func (m *GetNodeRequest) String() string { return proto.CompactTextString(m) } -func (*GetNodeRequest) ProtoMessage() {} -func (*GetNodeRequest) Descriptor() ([]byte, []int) { +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptor_7b2daf652facb3ae, []int{2} } -func (m *GetNodeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetNodeRequest.Unmarshal(m, b) +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metadata.Unmarshal(m, b) } -func (m *GetNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetNodeRequest.Marshal(b, m, deterministic) +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) } -func (m *GetNodeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNodeRequest.Merge(m, src) +func (m *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(m, src) } -func (m *GetNodeRequest) XXX_Size() int { - return xxx_messageInfo_GetNodeRequest.Size(m) +func (m *Metadata) XXX_Size() int { + return xxx_messageInfo_Metadata.Size(m) } -func (m *GetNodeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetNodeRequest.DiscardUnknown(m) +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) } -var xxx_messageInfo_GetNodeRequest proto.InternalMessageInfo +var xxx_messageInfo_Metadata proto.InternalMessageInfo -func (m *GetNodeRequest) GetId() string { +func (m *Metadata) GetGrpcAddress() string { if m != nil { - return m.Id + return m.GrpcAddress } return "" } -// use for raft cluster status -type GetNodeResponse struct { - NodeConfig *any.Any `protobuf:"bytes,1,opt,name=nodeConfig,proto3" json:"nodeConfig,omitempty"` - State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (m *Metadata) GetHttpAddress() string { + if m != nil { + return m.HttpAddress + } + return "" +} + +type Node struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + BindAddress string `protobuf:"bytes,2,opt,name=bind_address,json=bindAddress,proto3" json:"bind_address,omitempty"` + State Node_State `protobuf:"varint,3,opt,name=state,proto3,enum=index.Node_State" json:"state,omitempty"` + Metadata *Metadata `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *GetNodeResponse) Reset() { *m = GetNodeResponse{} } -func (m *GetNodeResponse) String() string { return proto.CompactTextString(m) } -func (*GetNodeResponse) ProtoMessage() {} -func (*GetNodeResponse) Descriptor() ([]byte, []int) { +func (m *Node) Reset() { *m = Node{} } +func (m *Node) String() string { return proto.CompactTextString(m) } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptor_7b2daf652facb3ae, []int{3} } -func (m *GetNodeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetNodeResponse.Unmarshal(m, b) +func (m *Node) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Node.Unmarshal(m, b) } -func (m *GetNodeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetNodeResponse.Marshal(b, m, deterministic) +func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Node.Marshal(b, m, deterministic) } -func (m *GetNodeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNodeResponse.Merge(m, src) +func (m *Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_Node.Merge(m, src) } -func (m *GetNodeResponse) XXX_Size() int { - return xxx_messageInfo_GetNodeResponse.Size(m) +func (m *Node) XXX_Size() int { + return xxx_messageInfo_Node.Size(m) } -func (m *GetNodeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetNodeResponse.DiscardUnknown(m) +func (m *Node) XXX_DiscardUnknown() { + xxx_messageInfo_Node.DiscardUnknown(m) } -var xxx_messageInfo_GetNodeResponse proto.InternalMessageInfo +var xxx_messageInfo_Node proto.InternalMessageInfo -func (m *GetNodeResponse) GetNodeConfig() *any.Any { +func (m *Node) GetId() string { if m != nil { - return m.NodeConfig + return m.Id } - return nil + return "" } -func (m *GetNodeResponse) GetState() string { +func (m *Node) GetBindAddress() string { if m != nil { - return m.State + return m.BindAddress } return "" } -// use for raft cluster status -type SetNodeRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - NodeConfig *any.Any `protobuf:"bytes,2,opt,name=nodeConfig,proto3" json:"nodeConfig,omitempty"` +func (m *Node) GetState() Node_State { + if m != nil { + return m.State + } + return Node_UNKNOWN +} + +func (m *Node) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +type Cluster struct { + Nodes map[string]*Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{4} +} + +func (m *Cluster) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Cluster.Unmarshal(m, b) +} +func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) +} +func (m *Cluster) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cluster.Merge(m, src) +} +func (m *Cluster) XXX_Size() int { + return xxx_messageInfo_Cluster.Size(m) +} +func (m *Cluster) XXX_DiscardUnknown() { + xxx_messageInfo_Cluster.DiscardUnknown(m) +} + +var xxx_messageInfo_Cluster proto.InternalMessageInfo + +func (m *Cluster) GetNodes() map[string]*Node { + if m != nil { + return m.Nodes + } + return nil +} + +type NodeInfoResponse struct { + Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *SetNodeRequest) Reset() { *m = SetNodeRequest{} } -func (m *SetNodeRequest) String() string { return proto.CompactTextString(m) } -func (*SetNodeRequest) ProtoMessage() {} -func (*SetNodeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{4} +func (m *NodeInfoResponse) Reset() { *m = NodeInfoResponse{} } +func (m *NodeInfoResponse) String() string { return proto.CompactTextString(m) } +func (*NodeInfoResponse) ProtoMessage() {} +func (*NodeInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{5} } -func (m *SetNodeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetNodeRequest.Unmarshal(m, b) +func (m *NodeInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeInfoResponse.Unmarshal(m, b) } -func (m *SetNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetNodeRequest.Marshal(b, m, deterministic) +func (m *NodeInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeInfoResponse.Marshal(b, m, deterministic) } -func (m *SetNodeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetNodeRequest.Merge(m, src) +func (m *NodeInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeInfoResponse.Merge(m, src) } -func (m *SetNodeRequest) XXX_Size() int { - return xxx_messageInfo_SetNodeRequest.Size(m) +func (m *NodeInfoResponse) XXX_Size() int { + return xxx_messageInfo_NodeInfoResponse.Size(m) } -func (m *SetNodeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetNodeRequest.DiscardUnknown(m) +func (m *NodeInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeInfoResponse.DiscardUnknown(m) } -var xxx_messageInfo_SetNodeRequest proto.InternalMessageInfo +var xxx_messageInfo_NodeInfoResponse proto.InternalMessageInfo -func (m *SetNodeRequest) GetId() string { +func (m *NodeInfoResponse) GetNode() *Node { if m != nil { - return m.Id + return m.Node } - return "" + return nil } -func (m *SetNodeRequest) GetNodeConfig() *any.Any { +type ClusterJoinRequest struct { + Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterJoinRequest) Reset() { *m = ClusterJoinRequest{} } +func (m *ClusterJoinRequest) String() string { return proto.CompactTextString(m) } +func (*ClusterJoinRequest) ProtoMessage() {} +func (*ClusterJoinRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{6} +} + +func (m *ClusterJoinRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterJoinRequest.Unmarshal(m, b) +} +func (m *ClusterJoinRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterJoinRequest.Marshal(b, m, deterministic) +} +func (m *ClusterJoinRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterJoinRequest.Merge(m, src) +} +func (m *ClusterJoinRequest) XXX_Size() int { + return xxx_messageInfo_ClusterJoinRequest.Size(m) +} +func (m *ClusterJoinRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterJoinRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterJoinRequest proto.InternalMessageInfo + +func (m *ClusterJoinRequest) GetNode() *Node { if m != nil { - return m.NodeConfig + return m.Node } return nil } -// use for raft cluster status -type DeleteNodeRequest struct { +type ClusterLeaveRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *DeleteNodeRequest) Reset() { *m = DeleteNodeRequest{} } -func (m *DeleteNodeRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteNodeRequest) ProtoMessage() {} -func (*DeleteNodeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{5} +func (m *ClusterLeaveRequest) Reset() { *m = ClusterLeaveRequest{} } +func (m *ClusterLeaveRequest) String() string { return proto.CompactTextString(m) } +func (*ClusterLeaveRequest) ProtoMessage() {} +func (*ClusterLeaveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{7} } -func (m *DeleteNodeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteNodeRequest.Unmarshal(m, b) +func (m *ClusterLeaveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterLeaveRequest.Unmarshal(m, b) } -func (m *DeleteNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteNodeRequest.Marshal(b, m, deterministic) +func (m *ClusterLeaveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterLeaveRequest.Marshal(b, m, deterministic) } -func (m *DeleteNodeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteNodeRequest.Merge(m, src) +func (m *ClusterLeaveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterLeaveRequest.Merge(m, src) } -func (m *DeleteNodeRequest) XXX_Size() int { - return xxx_messageInfo_DeleteNodeRequest.Size(m) +func (m *ClusterLeaveRequest) XXX_Size() int { + return xxx_messageInfo_ClusterLeaveRequest.Size(m) } -func (m *DeleteNodeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteNodeRequest.DiscardUnknown(m) +func (m *ClusterLeaveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterLeaveRequest.DiscardUnknown(m) } -var xxx_messageInfo_DeleteNodeRequest proto.InternalMessageInfo +var xxx_messageInfo_ClusterLeaveRequest proto.InternalMessageInfo -func (m *DeleteNodeRequest) GetId() string { +func (m *ClusterLeaveRequest) GetId() string { if m != nil { return m.Id } return "" } -// use for raft cluster status -type GetClusterResponse struct { - Cluster *any.Any `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` +type ClusterInfoResponse struct { + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *GetClusterResponse) Reset() { *m = GetClusterResponse{} } -func (m *GetClusterResponse) String() string { return proto.CompactTextString(m) } -func (*GetClusterResponse) ProtoMessage() {} -func (*GetClusterResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{6} +func (m *ClusterInfoResponse) Reset() { *m = ClusterInfoResponse{} } +func (m *ClusterInfoResponse) String() string { return proto.CompactTextString(m) } +func (*ClusterInfoResponse) ProtoMessage() {} +func (*ClusterInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{8} } -func (m *GetClusterResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetClusterResponse.Unmarshal(m, b) +func (m *ClusterInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterInfoResponse.Unmarshal(m, b) } -func (m *GetClusterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetClusterResponse.Marshal(b, m, deterministic) +func (m *ClusterInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterInfoResponse.Marshal(b, m, deterministic) } -func (m *GetClusterResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetClusterResponse.Merge(m, src) +func (m *ClusterInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterInfoResponse.Merge(m, src) } -func (m *GetClusterResponse) XXX_Size() int { - return xxx_messageInfo_GetClusterResponse.Size(m) +func (m *ClusterInfoResponse) XXX_Size() int { + return xxx_messageInfo_ClusterInfoResponse.Size(m) } -func (m *GetClusterResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetClusterResponse.DiscardUnknown(m) +func (m *ClusterInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterInfoResponse.DiscardUnknown(m) } -var xxx_messageInfo_GetClusterResponse proto.InternalMessageInfo +var xxx_messageInfo_ClusterInfoResponse proto.InternalMessageInfo -func (m *GetClusterResponse) GetCluster() *any.Any { +func (m *ClusterInfoResponse) GetCluster() *Cluster { + if m != nil { + return m.Cluster + } + return nil +} + +type ClusterWatchResponse struct { + Event ClusterWatchResponse_Event `protobuf:"varint,1,opt,name=event,proto3,enum=index.ClusterWatchResponse_Event" json:"event,omitempty"` + Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` + Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster,proto3" json:"cluster,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterWatchResponse) Reset() { *m = ClusterWatchResponse{} } +func (m *ClusterWatchResponse) String() string { return proto.CompactTextString(m) } +func (*ClusterWatchResponse) ProtoMessage() {} +func (*ClusterWatchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{9} +} + +func (m *ClusterWatchResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterWatchResponse.Unmarshal(m, b) +} +func (m *ClusterWatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterWatchResponse.Marshal(b, m, deterministic) +} +func (m *ClusterWatchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterWatchResponse.Merge(m, src) +} +func (m *ClusterWatchResponse) XXX_Size() int { + return xxx_messageInfo_ClusterWatchResponse.Size(m) +} +func (m *ClusterWatchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterWatchResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterWatchResponse proto.InternalMessageInfo + +func (m *ClusterWatchResponse) GetEvent() ClusterWatchResponse_Event { + if m != nil { + return m.Event + } + return ClusterWatchResponse_UNKNOWN +} + +func (m *ClusterWatchResponse) GetNode() *Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *ClusterWatchResponse) GetCluster() *Cluster { if m != nil { return m.Cluster } @@ -387,7 +603,7 @@ func (m *GetDocumentRequest) Reset() { *m = GetDocumentRequest{} } func (m *GetDocumentRequest) String() string { return proto.CompactTextString(m) } func (*GetDocumentRequest) ProtoMessage() {} func (*GetDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{7} + return fileDescriptor_7b2daf652facb3ae, []int{10} } func (m *GetDocumentRequest) XXX_Unmarshal(b []byte) error { @@ -426,7 +642,7 @@ func (m *GetDocumentResponse) Reset() { *m = GetDocumentResponse{} } func (m *GetDocumentResponse) String() string { return proto.CompactTextString(m) } func (*GetDocumentResponse) ProtoMessage() {} func (*GetDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{8} + return fileDescriptor_7b2daf652facb3ae, []int{11} } func (m *GetDocumentResponse) XXX_Unmarshal(b []byte) error { @@ -466,7 +682,7 @@ func (m *IndexDocumentRequest) Reset() { *m = IndexDocumentRequest{} } func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) } func (*IndexDocumentRequest) ProtoMessage() {} func (*IndexDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{9} + return fileDescriptor_7b2daf652facb3ae, []int{12} } func (m *IndexDocumentRequest) XXX_Unmarshal(b []byte) error { @@ -512,7 +728,7 @@ func (m *IndexDocumentResponse) Reset() { *m = IndexDocumentResponse{} } func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) } func (*IndexDocumentResponse) ProtoMessage() {} func (*IndexDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{10} + return fileDescriptor_7b2daf652facb3ae, []int{13} } func (m *IndexDocumentResponse) XXX_Unmarshal(b []byte) error { @@ -551,7 +767,7 @@ func (m *DeleteDocumentRequest) Reset() { *m = DeleteDocumentRequest{} } func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) } func (*DeleteDocumentRequest) ProtoMessage() {} func (*DeleteDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{11} + return fileDescriptor_7b2daf652facb3ae, []int{14} } func (m *DeleteDocumentRequest) XXX_Unmarshal(b []byte) error { @@ -590,7 +806,7 @@ func (m *DeleteDocumentResponse) Reset() { *m = DeleteDocumentResponse{} func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) } func (*DeleteDocumentResponse) ProtoMessage() {} func (*DeleteDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{12} + return fileDescriptor_7b2daf652facb3ae, []int{15} } func (m *DeleteDocumentResponse) XXX_Unmarshal(b []byte) error { @@ -629,7 +845,7 @@ func (m *SearchRequest) Reset() { *m = SearchRequest{} } func (m *SearchRequest) String() string { return proto.CompactTextString(m) } func (*SearchRequest) ProtoMessage() {} func (*SearchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{13} + return fileDescriptor_7b2daf652facb3ae, []int{16} } func (m *SearchRequest) XXX_Unmarshal(b []byte) error { @@ -668,7 +884,7 @@ func (m *SearchResponse) Reset() { *m = SearchResponse{} } func (m *SearchResponse) String() string { return proto.CompactTextString(m) } func (*SearchResponse) ProtoMessage() {} func (*SearchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{14} + return fileDescriptor_7b2daf652facb3ae, []int{17} } func (m *SearchResponse) XXX_Unmarshal(b []byte) error { @@ -707,7 +923,7 @@ func (m *GetIndexConfigResponse) Reset() { *m = GetIndexConfigResponse{} func (m *GetIndexConfigResponse) String() string { return proto.CompactTextString(m) } func (*GetIndexConfigResponse) ProtoMessage() {} func (*GetIndexConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{15} + return fileDescriptor_7b2daf652facb3ae, []int{18} } func (m *GetIndexConfigResponse) XXX_Unmarshal(b []byte) error { @@ -746,7 +962,7 @@ func (m *GetIndexStatsResponse) Reset() { *m = GetIndexStatsResponse{} } func (m *GetIndexStatsResponse) String() string { return proto.CompactTextString(m) } func (*GetIndexStatsResponse) ProtoMessage() {} func (*GetIndexStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{16} + return fileDescriptor_7b2daf652facb3ae, []int{19} } func (m *GetIndexStatsResponse) XXX_Unmarshal(b []byte) error { @@ -787,7 +1003,7 @@ func (m *Document) Reset() { *m = Document{} } func (m *Document) String() string { return proto.CompactTextString(m) } func (*Document) ProtoMessage() {} func (*Document) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{17} + return fileDescriptor_7b2daf652facb3ae, []int{20} } func (m *Document) XXX_Unmarshal(b []byte) error { @@ -823,15 +1039,21 @@ func (m *Document) GetFields() *any.Any { } func init() { - proto.RegisterEnum("index.LivenessProbeResponse_State", LivenessProbeResponse_State_name, LivenessProbeResponse_State_value) - proto.RegisterEnum("index.ReadinessProbeResponse_State", ReadinessProbeResponse_State_name, ReadinessProbeResponse_State_value) - proto.RegisterType((*LivenessProbeResponse)(nil), "index.LivenessProbeResponse") - proto.RegisterType((*ReadinessProbeResponse)(nil), "index.ReadinessProbeResponse") - proto.RegisterType((*GetNodeRequest)(nil), "index.GetNodeRequest") - proto.RegisterType((*GetNodeResponse)(nil), "index.GetNodeResponse") - proto.RegisterType((*SetNodeRequest)(nil), "index.SetNodeRequest") - proto.RegisterType((*DeleteNodeRequest)(nil), "index.DeleteNodeRequest") - proto.RegisterType((*GetClusterResponse)(nil), "index.GetClusterResponse") + proto.RegisterEnum("index.NodeHealthCheckRequest_Probe", NodeHealthCheckRequest_Probe_name, NodeHealthCheckRequest_Probe_value) + proto.RegisterEnum("index.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) + proto.RegisterEnum("index.Node_State", Node_State_name, Node_State_value) + proto.RegisterEnum("index.ClusterWatchResponse_Event", ClusterWatchResponse_Event_name, ClusterWatchResponse_Event_value) + proto.RegisterType((*NodeHealthCheckRequest)(nil), "index.NodeHealthCheckRequest") + proto.RegisterType((*NodeHealthCheckResponse)(nil), "index.NodeHealthCheckResponse") + proto.RegisterType((*Metadata)(nil), "index.Metadata") + proto.RegisterType((*Node)(nil), "index.Node") + proto.RegisterType((*Cluster)(nil), "index.Cluster") + proto.RegisterMapType((map[string]*Node)(nil), "index.Cluster.NodesEntry") + proto.RegisterType((*NodeInfoResponse)(nil), "index.NodeInfoResponse") + proto.RegisterType((*ClusterJoinRequest)(nil), "index.ClusterJoinRequest") + proto.RegisterType((*ClusterLeaveRequest)(nil), "index.ClusterLeaveRequest") + proto.RegisterType((*ClusterInfoResponse)(nil), "index.ClusterInfoResponse") + proto.RegisterType((*ClusterWatchResponse)(nil), "index.ClusterWatchResponse") proto.RegisterType((*GetDocumentRequest)(nil), "index.GetDocumentRequest") proto.RegisterType((*GetDocumentResponse)(nil), "index.GetDocumentResponse") proto.RegisterType((*IndexDocumentRequest)(nil), "index.IndexDocumentRequest") @@ -848,55 +1070,74 @@ func init() { func init() { proto.RegisterFile("protobuf/index/index.proto", fileDescriptor_7b2daf652facb3ae) } var fileDescriptor_7b2daf652facb3ae = []byte{ - // 755 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xef, 0x6f, 0xd2, 0x50, - 0x14, 0x2d, 0x44, 0xc6, 0x76, 0x19, 0x88, 0x4f, 0x20, 0x5b, 0xb7, 0x25, 0xcb, 0x9b, 0xd1, 0x99, - 0x68, 0x31, 0x53, 0x33, 0x7f, 0x7d, 0x10, 0x07, 0xe2, 0xb2, 0x85, 0xb9, 0x32, 0xb7, 0x68, 0x62, - 0x48, 0xa1, 0x6f, 0xd0, 0x08, 0x7d, 0xc8, 0x7b, 0x35, 0x2e, 0xf1, 0x9b, 0x7f, 0xa3, 0xff, 0x8f, - 0x69, 0x5f, 0x5b, 0xda, 0xae, 0x2d, 0x4b, 0xf6, 0x85, 0xe4, 0xdd, 0x77, 0xee, 0x39, 0xf7, 0x5d, - 0xee, 0x3d, 0x29, 0xc8, 0xd3, 0x19, 0xe5, 0xb4, 0x6f, 0x5d, 0xd6, 0x0d, 0x53, 0x27, 0xbf, 0xc5, - 0xaf, 0xe2, 0x04, 0x51, 0xce, 0x39, 0xc8, 0xeb, 0x43, 0x4a, 0x87, 0x63, 0x52, 0xf7, 0x91, 0x9a, - 0x79, 0x25, 0x10, 0xf2, 0x46, 0xf4, 0x8a, 0x4c, 0xa6, 0xdc, 0xbd, 0xc4, 0x7f, 0xa0, 0x7a, 0x6c, - 0xfc, 0x22, 0x26, 0x61, 0xec, 0xf3, 0x8c, 0xf6, 0x89, 0x4a, 0xd8, 0x94, 0x9a, 0x8c, 0xa0, 0x57, - 0x90, 0x63, 0x5c, 0xe3, 0x64, 0x2d, 0xb3, 0x9d, 0xd9, 0x2d, 0xed, 0x61, 0x45, 0x88, 0xc6, 0x82, - 0x95, 0xae, 0x8d, 0x54, 0x45, 0x02, 0x7e, 0x0c, 0x39, 0xe7, 0x8c, 0x0a, 0x90, 0xff, 0xd2, 0x39, - 0xea, 0x9c, 0x5c, 0x74, 0xca, 0x12, 0x5a, 0x81, 0x5c, 0xe3, 0xf8, 0xf0, 0xbc, 0x55, 0xce, 0xa0, - 0x65, 0xb8, 0xd3, 0x6c, 0x35, 0x9a, 0xe5, 0x2c, 0xfe, 0x9b, 0x81, 0x9a, 0x4a, 0x34, 0xdd, 0xb8, - 0xae, 0xff, 0x3a, 0xac, 0xbf, 0xe3, 0xea, 0xc7, 0xa3, 0xc3, 0x05, 0x28, 0x49, 0x05, 0xa8, 0xad, - 0x46, 0xf3, 0x6b, 0x39, 0x83, 0x8a, 0xb0, 0xd2, 0x39, 0x39, 0xeb, 0x89, 0x63, 0x16, 0x6f, 0x43, - 0xa9, 0x4d, 0x78, 0x87, 0xea, 0x44, 0x25, 0x3f, 0x2d, 0xc2, 0x38, 0x2a, 0x41, 0xd6, 0xd0, 0x1d, - 0xe5, 0x15, 0x35, 0x6b, 0xe8, 0xf8, 0x3b, 0xdc, 0xf5, 0x11, 0x6e, 0x7d, 0x2f, 0x00, 0x4c, 0xaa, - 0x93, 0x03, 0x6a, 0x5e, 0x1a, 0x43, 0x07, 0x5a, 0xd8, 0xab, 0x28, 0xa2, 0xd5, 0x8a, 0xd7, 0x6a, - 0xa5, 0x61, 0x5e, 0xa9, 0x01, 0x1c, 0xaa, 0x78, 0xaf, 0xca, 0x3a, 0xdc, 0x6e, 0xc1, 0xe7, 0x50, - 0xea, 0xa6, 0x16, 0x10, 0x51, 0xcb, 0xde, 0x4c, 0x0d, 0xef, 0xc0, 0xbd, 0x26, 0x19, 0x13, 0x4e, - 0xd2, 0xde, 0xd6, 0x04, 0xd4, 0x26, 0xfc, 0x60, 0x6c, 0x31, 0x4e, 0x66, 0xfe, 0xf3, 0x14, 0xc8, - 0x0f, 0x44, 0x28, 0xf5, 0x6d, 0x1e, 0x08, 0x3f, 0x70, 0x58, 0x9a, 0x74, 0x60, 0x4d, 0x88, 0xc9, - 0x93, 0xb4, 0x0e, 0xe0, 0x7e, 0x08, 0xe5, 0x8a, 0x3d, 0x81, 0xa5, 0x4b, 0x83, 0x8c, 0x75, 0x96, - 0xaa, 0xe5, 0x62, 0xf0, 0x19, 0x54, 0x0e, 0xed, 0x59, 0x58, 0x20, 0x16, 0x60, 0xcd, 0xde, 0x80, - 0xf5, 0x29, 0x54, 0x23, 0xac, 0x6e, 0x71, 0x15, 0xc8, 0x0d, 0xa8, 0x65, 0x72, 0x87, 0x39, 0xa7, - 0x8a, 0x03, 0x7e, 0x04, 0x55, 0xd1, 0xda, 0x45, 0x4f, 0x56, 0xa0, 0x16, 0x05, 0xa6, 0x12, 0x1f, - 0x43, 0xb1, 0x4b, 0xb4, 0xd9, 0x60, 0xe4, 0x11, 0xbe, 0x85, 0x12, 0x73, 0x02, 0xbd, 0x99, 0x88, - 0xa4, 0x36, 0xa9, 0xc8, 0x82, 0xc9, 0xf8, 0xc8, 0x9e, 0x2c, 0x11, 0xf0, 0xf7, 0xaa, 0xe8, 0xd3, - 0x31, 0x6b, 0x9c, 0xce, 0xb6, 0xea, 0xb1, 0xd9, 0x48, 0x7c, 0x0a, 0xb5, 0x36, 0xe1, 0x4e, 0x97, - 0xc4, 0x80, 0xf9, 0xa4, 0xfb, 0xb0, 0xea, 0xac, 0x67, 0x6f, 0xb0, 0x78, 0x1d, 0x0a, 0xc6, 0x9c, - 0x00, 0x77, 0xa0, 0xea, 0x51, 0xda, 0x2b, 0xcb, 0x7c, 0xc6, 0x97, 0x20, 0x70, 0x3d, 0x7b, 0x43, - 0xd2, 0xe7, 0x02, 0x0c, 0x3f, 0x1d, 0x7f, 0x82, 0x65, 0xaf, 0xcf, 0xb7, 0x9b, 0x87, 0xbd, 0x7f, - 0x79, 0xc8, 0x39, 0x75, 0xa1, 0x36, 0x14, 0x43, 0xae, 0x87, 0x6a, 0xd7, 0x12, 0x5b, 0xb6, 0xa3, - 0xca, 0x9b, 0x69, 0x1e, 0x89, 0x25, 0x74, 0x08, 0xa5, 0xb0, 0x7d, 0x25, 0x32, 0x6d, 0xa5, 0xba, - 0x1d, 0x96, 0xd0, 0x1b, 0xc8, 0xbb, 0x86, 0x84, 0xaa, 0x2e, 0x36, 0x6c, 0x61, 0x72, 0x2d, 0x1a, - 0x0e, 0xe6, 0x76, 0x23, 0xb9, 0xdd, 0x68, 0x6e, 0x6c, 0x59, 0x58, 0x42, 0xef, 0x01, 0xe6, 0x8e, - 0x82, 0xd6, 0xdc, 0xf4, 0x6b, 0x26, 0x93, 0xc2, 0xd0, 0x00, 0x98, 0xdb, 0x4d, 0x62, 0x03, 0xd6, - 0xe7, 0xd5, 0x47, 0x9c, 0x09, 0x4b, 0xa8, 0x05, 0xab, 0x17, 0x1a, 0x1f, 0x8c, 0x6e, 0x43, 0xf2, - 0x2c, 0x83, 0x3e, 0x42, 0x21, 0x60, 0x46, 0x28, 0x80, 0x8e, 0xec, 0xb4, 0x2c, 0xc7, 0x5d, 0xf9, - 0xe5, 0x74, 0xa0, 0x18, 0x72, 0x0e, 0xb4, 0xe1, 0xc2, 0xe3, 0x5c, 0xca, 0x1f, 0x92, 0x58, 0xb3, - 0xc1, 0xd2, 0x6e, 0x06, 0x9d, 0x42, 0x29, 0xec, 0x18, 0x68, 0x33, 0xd4, 0xe7, 0x28, 0xe3, 0x56, - 0xc2, 0x6d, 0x80, 0x72, 0x1f, 0x96, 0x84, 0x0d, 0xa0, 0x8a, 0xff, 0x8f, 0x07, 0x6c, 0x42, 0xae, - 0x46, 0xa2, 0xc1, 0x91, 0x0d, 0xaf, 0xfc, 0xc2, 0x91, 0x8d, 0x77, 0x08, 0x2c, 0xd9, 0x6b, 0x14, - 0x5a, 0xf5, 0x85, 0x6b, 0x14, 0x6b, 0x0c, 0x58, 0x42, 0xef, 0x60, 0xb9, 0x6b, 0x6a, 0x53, 0x36, - 0xa2, 0x3c, 0x91, 0x23, 0x71, 0xfe, 0x3e, 0xec, 0x7e, 0x7b, 0x38, 0x34, 0xf8, 0xc8, 0xea, 0x2b, - 0x03, 0x3a, 0xa9, 0x4f, 0x28, 0xb3, 0x7e, 0x68, 0xf5, 0xfe, 0x58, 0x63, 0xbc, 0x1e, 0xfe, 0xca, - 0xea, 0x2f, 0x39, 0xe7, 0xe7, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x23, 0xe9, 0x66, 0x7e, - 0x09, 0x00, 0x00, + // 1067 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x5d, 0x53, 0xdb, 0x46, + 0x14, 0xb5, 0x6c, 0x0b, 0x9c, 0x6b, 0x6c, 0xdc, 0x0d, 0x90, 0x44, 0x09, 0x6d, 0xd8, 0xa6, 0x8d, + 0x67, 0xda, 0xda, 0x1d, 0x32, 0x4c, 0x9a, 0xb4, 0x9d, 0x8e, 0x63, 0x29, 0xe0, 0xe0, 0x0a, 0x22, + 0x43, 0x98, 0xf4, 0x85, 0x91, 0xad, 0x05, 0x6b, 0x30, 0x92, 0x6b, 0xad, 0x99, 0xf2, 0xd8, 0xd7, + 0xfe, 0x92, 0xf6, 0xe7, 0xf4, 0xbd, 0x3f, 0xa6, 0xb3, 0x1f, 0x12, 0x92, 0xb0, 0x44, 0x67, 0xfa, + 0xc2, 0xb0, 0x77, 0xcf, 0x3d, 0x7b, 0xee, 0xdd, 0xbb, 0x47, 0x06, 0x6d, 0x3a, 0xf3, 0xa9, 0x3f, + 0x9c, 0x9f, 0xb5, 0x5d, 0xcf, 0x21, 0xbf, 0x89, 0xbf, 0x2d, 0x1e, 0x44, 0x2a, 0x5f, 0x68, 0x8f, + 0xce, 0x7d, 0xff, 0x7c, 0x42, 0xda, 0x11, 0xd2, 0xf6, 0xae, 0x05, 0x42, 0x7b, 0x9c, 0xde, 0x22, + 0x97, 0x53, 0x2a, 0x37, 0xf1, 0x1f, 0x0a, 0x6c, 0x98, 0xbe, 0x43, 0xf6, 0x88, 0x3d, 0xa1, 0xe3, + 0xee, 0x98, 0x8c, 0x2e, 0x2c, 0xf2, 0xeb, 0x9c, 0x04, 0x14, 0xbd, 0x02, 0x75, 0x3a, 0xf3, 0x87, + 0xe4, 0xa1, 0xf2, 0x54, 0x69, 0xd6, 0xb7, 0x3f, 0x6f, 0x89, 0x63, 0x17, 0xa3, 0x5b, 0x87, 0x0c, + 0x6a, 0x89, 0x0c, 0xbc, 0x03, 0x2a, 0x5f, 0xa3, 0x55, 0xa8, 0xee, 0x19, 0x9d, 0xfe, 0xd1, 0x5e, + 0xcf, 0x34, 0x06, 0x83, 0x46, 0x01, 0xad, 0x40, 0xa5, 0xdf, 0xfb, 0x60, 0xf0, 0x95, 0x82, 0x6a, + 0x70, 0xcf, 0x32, 0x3a, 0xba, 0xd8, 0x2c, 0xe2, 0xbf, 0x14, 0x78, 0x70, 0x8b, 0x3e, 0x98, 0xfa, + 0x5e, 0x40, 0xd0, 0x6b, 0x50, 0x03, 0x6a, 0xd3, 0x50, 0xcd, 0xb3, 0x2c, 0x35, 0x02, 0xde, 0x1a, + 0x30, 0xac, 0x25, 0x52, 0xb0, 0x05, 0x2a, 0x5f, 0xa3, 0x2a, 0x2c, 0x0b, 0x39, 0x1f, 0x1b, 0x05, + 0x76, 0xf8, 0xb1, 0x19, 0x2e, 0x15, 0x74, 0x0f, 0xd4, 0x0e, 0x93, 0xd6, 0x28, 0xa2, 0x0a, 0x94, + 0x75, 0xa3, 0xa3, 0x37, 0x4a, 0x2c, 0xc8, 0x04, 0x7e, 0x6c, 0x94, 0x19, 0xdc, 0x3c, 0x38, 0x3a, + 0x15, 0x4b, 0x15, 0x1f, 0x42, 0xe5, 0x67, 0x42, 0x6d, 0xc7, 0xa6, 0x36, 0xda, 0x82, 0x95, 0xf3, + 0xd9, 0x74, 0x74, 0x6a, 0x3b, 0xce, 0x8c, 0x04, 0x01, 0x97, 0x78, 0xcf, 0xaa, 0xb2, 0x58, 0x47, + 0x84, 0x18, 0x64, 0x4c, 0xe9, 0x34, 0x82, 0x14, 0x05, 0x84, 0xc5, 0x24, 0x04, 0xff, 0xa3, 0x40, + 0x99, 0x95, 0x83, 0xea, 0x50, 0x74, 0x1d, 0x49, 0x52, 0x74, 0x1d, 0x96, 0x3b, 0x74, 0x3d, 0x27, + 0x9d, 0xcb, 0x62, 0x21, 0xfd, 0xf3, 0xb0, 0x3b, 0x25, 0xde, 0x9d, 0x4f, 0x62, 0xdd, 0x49, 0xb4, + 0x02, 0x7d, 0x05, 0x95, 0x4b, 0x29, 0xfb, 0x61, 0xf9, 0xa9, 0xd2, 0xac, 0x6e, 0xaf, 0x4a, 0x6c, + 0x58, 0x8d, 0x15, 0x01, 0xf0, 0x7e, 0xac, 0x6f, 0xc7, 0xe6, 0xbe, 0x79, 0x70, 0x62, 0x8a, 0x2b, + 0x7c, 0x7b, 0xd0, 0xef, 0x1f, 0x9c, 0x18, 0x96, 0xb8, 0xc2, 0x6e, 0xc7, 0xd4, 0x7b, 0x7a, 0xe7, + 0x88, 0xb5, 0x0e, 0x60, 0xa9, 0x6f, 0x74, 0x74, 0xc3, 0x6a, 0x94, 0x18, 0x70, 0xb0, 0x77, 0x7c, + 0xa4, 0xb3, 0xb4, 0x32, 0xfe, 0x5d, 0x81, 0xe5, 0xee, 0x64, 0x1e, 0x50, 0x32, 0x43, 0x6d, 0x50, + 0x3d, 0xdf, 0x21, 0xac, 0x53, 0xa5, 0x66, 0x75, 0xfb, 0x91, 0x94, 0x20, 0xb7, 0xb9, 0xec, 0xc0, + 0xf0, 0xe8, 0xec, 0xda, 0x12, 0x38, 0xcd, 0x00, 0xb8, 0x09, 0xa2, 0x06, 0x94, 0x2e, 0xc8, 0xb5, + 0xec, 0x10, 0xfb, 0x17, 0x6d, 0x81, 0x7a, 0x65, 0x4f, 0xe6, 0x84, 0xf7, 0xa6, 0xba, 0x5d, 0x8d, + 0xd5, 0x6f, 0x89, 0x9d, 0xd7, 0xc5, 0xef, 0x14, 0xfc, 0x02, 0x1a, 0x2c, 0xd4, 0xf3, 0xce, 0xfc, + 0x68, 0xb0, 0x3e, 0x83, 0x32, 0x3b, 0x83, 0xb3, 0xa5, 0x32, 0xf9, 0x06, 0xde, 0x01, 0x24, 0x85, + 0xbd, 0xf3, 0x5d, 0x2f, 0x7c, 0x1d, 0x77, 0xa6, 0x7d, 0x01, 0xf7, 0x65, 0x5a, 0x9f, 0xd8, 0x57, + 0x24, 0xcc, 0x4b, 0x5d, 0x2e, 0xfe, 0x29, 0x82, 0x25, 0x54, 0x35, 0x61, 0x79, 0x24, 0xc2, 0xf2, + 0x84, 0x7a, 0xb2, 0x47, 0x56, 0xb8, 0x8d, 0xff, 0x56, 0x60, 0x4d, 0x06, 0x4f, 0x6c, 0x3a, 0x1a, + 0x47, 0x14, 0x2f, 0x41, 0x25, 0x57, 0xc4, 0xa3, 0xf2, 0xc5, 0x6c, 0x25, 0x09, 0x12, 0xd8, 0x96, + 0xc1, 0x80, 0x96, 0xc0, 0x47, 0xa5, 0x15, 0x33, 0x4a, 0x8b, 0x8b, 0x2b, 0xe5, 0x8b, 0xdb, 0x01, + 0x95, 0x53, 0x27, 0x27, 0xa8, 0x02, 0xe5, 0x77, 0x07, 0x3d, 0x53, 0x3c, 0xba, 0xbe, 0xd1, 0xf9, + 0x20, 0x27, 0xe7, 0xf8, 0x90, 0x4f, 0x51, 0x09, 0x3f, 0x03, 0xb4, 0x4b, 0xa8, 0xee, 0x8f, 0xe6, + 0x97, 0x4c, 0x57, 0x46, 0xeb, 0xba, 0x70, 0x3f, 0x81, 0x92, 0x75, 0x7f, 0x0d, 0x4b, 0x67, 0x2e, + 0x99, 0x38, 0x81, 0xec, 0xdc, 0x5a, 0x4b, 0x18, 0x60, 0x2b, 0x34, 0xc0, 0x56, 0xc7, 0xbb, 0xb6, + 0x24, 0x06, 0x1f, 0xc1, 0x5a, 0x8f, 0x69, 0xbf, 0xe3, 0xb0, 0x18, 0x6b, 0xf1, 0x3f, 0xb0, 0x7e, + 0x03, 0xeb, 0x29, 0x56, 0x29, 0x6e, 0x0d, 0xd4, 0x91, 0x3f, 0x97, 0x97, 0xa2, 0x5a, 0x62, 0x81, + 0x9f, 0xc3, 0xba, 0x4e, 0x26, 0x84, 0x92, 0xbb, 0x4a, 0x6e, 0xc1, 0x46, 0x1a, 0x98, 0x4b, 0xdc, + 0x87, 0xda, 0x80, 0xd8, 0x33, 0x76, 0xd3, 0x82, 0xf0, 0x7b, 0xa8, 0x07, 0x3c, 0x70, 0x3a, 0x13, + 0x91, 0xdc, 0x26, 0xd5, 0x82, 0x78, 0x32, 0xde, 0x87, 0x7a, 0xc8, 0x26, 0x4f, 0x7d, 0x05, 0xb5, + 0x88, 0x2e, 0x98, 0x4f, 0xf2, 0xd9, 0x56, 0x42, 0x36, 0x86, 0xc4, 0xef, 0x61, 0x63, 0x97, 0x50, + 0xde, 0xa5, 0xae, 0xef, 0x9d, 0xb9, 0xe7, 0xb1, 0xc1, 0x5d, 0xe1, 0xe3, 0x74, 0x3a, 0xe2, 0xf1, + 0x5c, 0xce, 0xaa, 0x7b, 0x43, 0x80, 0x4d, 0x58, 0x0f, 0x29, 0x99, 0x6f, 0x05, 0x11, 0xe3, 0x0e, + 0x08, 0xdc, 0x29, 0x33, 0xc1, 0xfc, 0xb9, 0x00, 0x37, 0x4a, 0xc7, 0x7b, 0x50, 0x09, 0xfb, 0xfc, + 0xff, 0xe6, 0x61, 0xfb, 0xcf, 0x65, 0x50, 0xb9, 0x2e, 0x64, 0xc1, 0x6a, 0xea, 0x9b, 0x85, 0x36, + 0x73, 0xbf, 0xac, 0xda, 0xa7, 0xf9, 0x9f, 0x3a, 0x5c, 0x40, 0x3f, 0x42, 0x25, 0xb4, 0x35, 0xb4, + 0x71, 0x4b, 0x87, 0xc1, 0x3e, 0xf7, 0xda, 0x83, 0x18, 0x4b, 0xdc, 0x69, 0x70, 0x01, 0xbd, 0x81, + 0x6a, 0xcc, 0xe0, 0x50, 0xca, 0x8d, 0x63, 0xa6, 0xa7, 0x65, 0x90, 0xe3, 0x02, 0xd2, 0x61, 0x25, + 0xee, 0x76, 0x48, 0x4b, 0x92, 0xc4, 0x2d, 0x30, 0x87, 0xa5, 0x1b, 0x29, 0xc9, 0xad, 0x25, 0x45, + 0x9e, 0x2a, 0x67, 0x37, 0x92, 0xc2, 0x3d, 0x2e, 0x93, 0xe5, 0x71, 0x8e, 0x21, 0xe2, 0xc2, 0xb7, + 0x0a, 0x7a, 0x0b, 0xd5, 0x98, 0xbf, 0x44, 0x7d, 0xb9, 0xed, 0x4c, 0x91, 0xa0, 0x05, 0x76, 0x84, + 0x0b, 0xc8, 0x84, 0x5a, 0xc2, 0x0c, 0x50, 0x78, 0xf2, 0x22, 0xe3, 0xd1, 0x9e, 0x2c, 0xde, 0x0c, + 0xd9, 0x9a, 0x0a, 0x7a, 0x0f, 0xf5, 0xa4, 0x09, 0xa0, 0x30, 0x67, 0xa1, 0x89, 0x68, 0x9b, 0x19, + 0xbb, 0x31, 0xca, 0x97, 0xb0, 0x24, 0x5e, 0x36, 0x5a, 0x93, 0xe0, 0x84, 0x6d, 0x68, 0xeb, 0xa9, + 0x68, 0x54, 0x5b, 0x0f, 0xea, 0xc9, 0x57, 0x9c, 0xd9, 0xee, 0xcd, 0x9b, 0x1e, 0x2d, 0x78, 0xf4, + 0xfc, 0xde, 0x6a, 0x89, 0xd7, 0x9b, 0xc9, 0xf4, 0x24, 0xc5, 0x94, 0x78, 0xeb, 0xb8, 0x80, 0x7e, + 0x80, 0xca, 0xc0, 0xb3, 0xa7, 0xc1, 0xd8, 0xa7, 0x99, 0x1c, 0x99, 0x33, 0xf8, 0xa6, 0xf9, 0xcb, + 0x97, 0xe7, 0x2e, 0x1d, 0xcf, 0x87, 0xad, 0x91, 0x7f, 0xd9, 0xbe, 0xf4, 0x83, 0xf9, 0x85, 0xdd, + 0x1e, 0x4e, 0xec, 0x80, 0xb6, 0x93, 0x3f, 0xc3, 0x87, 0x4b, 0x7c, 0xfd, 0xe2, 0xdf, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x05, 0xe3, 0xab, 0x2e, 0x9f, 0x0b, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -911,13 +1152,12 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type IndexClient interface { - LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) - ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) - GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) - SetNode(ctx context.Context, in *SetNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) - DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) - GetCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetClusterResponse, error) - WatchCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_WatchClusterClient, error) + NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) + NodeInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeInfoResponse, error) + ClusterJoin(ctx context.Context, in *ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) + ClusterLeave(ctx context.Context, in *ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) + ClusterInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterInfoResponse, error) + ClusterWatch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_ClusterWatchClient, error) GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Index_IndexDocumentClient, error) DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Index_DeleteDocumentClient, error) @@ -935,66 +1175,57 @@ func NewIndexClient(cc *grpc.ClientConn) IndexClient { return &indexClient{cc} } -func (c *indexClient) LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) { - out := new(LivenessProbeResponse) - err := c.cc.Invoke(ctx, "/index.Index/LivenessProbe", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) { - out := new(ReadinessProbeResponse) - err := c.cc.Invoke(ctx, "/index.Index/ReadinessProbe", in, out, opts...) +func (c *indexClient) NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) { + out := new(NodeHealthCheckResponse) + err := c.cc.Invoke(ctx, "/index.Index/NodeHealthCheck", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *indexClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) { - out := new(GetNodeResponse) - err := c.cc.Invoke(ctx, "/index.Index/GetNode", in, out, opts...) +func (c *indexClient) NodeInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeInfoResponse, error) { + out := new(NodeInfoResponse) + err := c.cc.Invoke(ctx, "/index.Index/NodeInfo", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *indexClient) SetNode(ctx context.Context, in *SetNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { +func (c *indexClient) ClusterJoin(ctx context.Context, in *ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/index.Index/SetNode", in, out, opts...) + err := c.cc.Invoke(ctx, "/index.Index/ClusterJoin", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *indexClient) DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { +func (c *indexClient) ClusterLeave(ctx context.Context, in *ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/index.Index/DeleteNode", in, out, opts...) + err := c.cc.Invoke(ctx, "/index.Index/ClusterLeave", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *indexClient) GetCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetClusterResponse, error) { - out := new(GetClusterResponse) - err := c.cc.Invoke(ctx, "/index.Index/GetCluster", in, out, opts...) +func (c *indexClient) ClusterInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterInfoResponse, error) { + out := new(ClusterInfoResponse) + err := c.cc.Invoke(ctx, "/index.Index/ClusterInfo", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *indexClient) WatchCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_WatchClusterClient, error) { - stream, err := c.cc.NewStream(ctx, &_Index_serviceDesc.Streams[0], "/index.Index/WatchCluster", opts...) +func (c *indexClient) ClusterWatch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_ClusterWatchClient, error) { + stream, err := c.cc.NewStream(ctx, &_Index_serviceDesc.Streams[0], "/index.Index/ClusterWatch", opts...) if err != nil { return nil, err } - x := &indexWatchClusterClient{stream} + x := &indexClusterWatchClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -1004,17 +1235,17 @@ func (c *indexClient) WatchCluster(ctx context.Context, in *empty.Empty, opts .. return x, nil } -type Index_WatchClusterClient interface { - Recv() (*GetClusterResponse, error) +type Index_ClusterWatchClient interface { + Recv() (*ClusterWatchResponse, error) grpc.ClientStream } -type indexWatchClusterClient struct { +type indexClusterWatchClient struct { grpc.ClientStream } -func (x *indexWatchClusterClient) Recv() (*GetClusterResponse, error) { - m := new(GetClusterResponse) +func (x *indexClusterWatchClient) Recv() (*ClusterWatchResponse, error) { + m := new(ClusterWatchResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } @@ -1136,13 +1367,12 @@ func (c *indexClient) Snapshot(ctx context.Context, in *empty.Empty, opts ...grp // IndexServer is the server API for Index service. type IndexServer interface { - LivenessProbe(context.Context, *empty.Empty) (*LivenessProbeResponse, error) - ReadinessProbe(context.Context, *empty.Empty) (*ReadinessProbeResponse, error) - GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error) - SetNode(context.Context, *SetNodeRequest) (*empty.Empty, error) - DeleteNode(context.Context, *DeleteNodeRequest) (*empty.Empty, error) - GetCluster(context.Context, *empty.Empty) (*GetClusterResponse, error) - WatchCluster(*empty.Empty, Index_WatchClusterServer) error + NodeHealthCheck(context.Context, *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) + NodeInfo(context.Context, *empty.Empty) (*NodeInfoResponse, error) + ClusterJoin(context.Context, *ClusterJoinRequest) (*empty.Empty, error) + ClusterLeave(context.Context, *ClusterLeaveRequest) (*empty.Empty, error) + ClusterInfo(context.Context, *empty.Empty) (*ClusterInfoResponse, error) + ClusterWatch(*empty.Empty, Index_ClusterWatchServer) error GetDocument(context.Context, *GetDocumentRequest) (*GetDocumentResponse, error) IndexDocument(Index_IndexDocumentServer) error DeleteDocument(Index_DeleteDocumentServer) error @@ -1156,132 +1386,114 @@ func RegisterIndexServer(s *grpc.Server, srv IndexServer) { s.RegisterService(&_Index_serviceDesc, srv) } -func _Index_LivenessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) +func _Index_NodeHealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeHealthCheckRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(IndexServer).LivenessProbe(ctx, in) + return srv.(IndexServer).NodeHealthCheck(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Index/LivenessProbe", + FullMethod: "/index.Index/NodeHealthCheck", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).LivenessProbe(ctx, req.(*empty.Empty)) + return srv.(IndexServer).NodeHealthCheck(ctx, req.(*NodeHealthCheckRequest)) } return interceptor(ctx, in, info, handler) } -func _Index_ReadinessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _Index_NodeInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(IndexServer).ReadinessProbe(ctx, in) + return srv.(IndexServer).NodeInfo(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Index/ReadinessProbe", + FullMethod: "/index.Index/NodeInfo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).ReadinessProbe(ctx, req.(*empty.Empty)) + return srv.(IndexServer).NodeInfo(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } -func _Index_GetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetNodeRequest) +func _Index_ClusterJoin_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ClusterJoinRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(IndexServer).GetNode(ctx, in) + return srv.(IndexServer).ClusterJoin(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Index/GetNode", + FullMethod: "/index.Index/ClusterJoin", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).GetNode(ctx, req.(*GetNodeRequest)) + return srv.(IndexServer).ClusterJoin(ctx, req.(*ClusterJoinRequest)) } return interceptor(ctx, in, info, handler) } -func _Index_SetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetNodeRequest) +func _Index_ClusterLeave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ClusterLeaveRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(IndexServer).SetNode(ctx, in) + return srv.(IndexServer).ClusterLeave(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Index/SetNode", + FullMethod: "/index.Index/ClusterLeave", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).SetNode(ctx, req.(*SetNodeRequest)) + return srv.(IndexServer).ClusterLeave(ctx, req.(*ClusterLeaveRequest)) } return interceptor(ctx, in, info, handler) } -func _Index_DeleteNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteNodeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).DeleteNode(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/DeleteNode", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).DeleteNode(ctx, req.(*DeleteNodeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _Index_ClusterInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(IndexServer).GetCluster(ctx, in) + return srv.(IndexServer).ClusterInfo(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Index/GetCluster", + FullMethod: "/index.Index/ClusterInfo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).GetCluster(ctx, req.(*empty.Empty)) + return srv.(IndexServer).ClusterInfo(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } -func _Index_WatchCluster_Handler(srv interface{}, stream grpc.ServerStream) error { +func _Index_ClusterWatch_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(empty.Empty) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(IndexServer).WatchCluster(m, &indexWatchClusterServer{stream}) + return srv.(IndexServer).ClusterWatch(m, &indexClusterWatchServer{stream}) } -type Index_WatchClusterServer interface { - Send(*GetClusterResponse) error +type Index_ClusterWatchServer interface { + Send(*ClusterWatchResponse) error grpc.ServerStream } -type indexWatchClusterServer struct { +type indexClusterWatchServer struct { grpc.ServerStream } -func (x *indexWatchClusterServer) Send(m *GetClusterResponse) error { +func (x *indexClusterWatchServer) Send(m *ClusterWatchResponse) error { return x.ServerStream.SendMsg(m) } @@ -1432,28 +1644,24 @@ var _Index_serviceDesc = grpc.ServiceDesc{ HandlerType: (*IndexServer)(nil), Methods: []grpc.MethodDesc{ { - MethodName: "LivenessProbe", - Handler: _Index_LivenessProbe_Handler, - }, - { - MethodName: "ReadinessProbe", - Handler: _Index_ReadinessProbe_Handler, + MethodName: "NodeHealthCheck", + Handler: _Index_NodeHealthCheck_Handler, }, { - MethodName: "GetNode", - Handler: _Index_GetNode_Handler, + MethodName: "NodeInfo", + Handler: _Index_NodeInfo_Handler, }, { - MethodName: "SetNode", - Handler: _Index_SetNode_Handler, + MethodName: "ClusterJoin", + Handler: _Index_ClusterJoin_Handler, }, { - MethodName: "DeleteNode", - Handler: _Index_DeleteNode_Handler, + MethodName: "ClusterLeave", + Handler: _Index_ClusterLeave_Handler, }, { - MethodName: "GetCluster", - Handler: _Index_GetCluster_Handler, + MethodName: "ClusterInfo", + Handler: _Index_ClusterInfo_Handler, }, { MethodName: "GetDocument", @@ -1478,8 +1686,8 @@ var _Index_serviceDesc = grpc.ServiceDesc{ }, Streams: []grpc.StreamDesc{ { - StreamName: "WatchCluster", - Handler: _Index_WatchCluster_Handler, + StreamName: "ClusterWatch", + Handler: _Index_ClusterWatch_Handler, ServerStreams: true, }, { diff --git a/protobuf/index/index.proto b/protobuf/index/index.proto index 4629c22..bf354b0 100644 --- a/protobuf/index/index.proto +++ b/protobuf/index/index.proto @@ -22,14 +22,13 @@ package index; option go_package = "github.com/mosuka/blast/protobuf/index"; service Index { - rpc LivenessProbe (google.protobuf.Empty) returns (LivenessProbeResponse) {} - rpc ReadinessProbe (google.protobuf.Empty) returns (ReadinessProbeResponse) {} + rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) {} + rpc NodeInfo (google.protobuf.Empty) returns (NodeInfoResponse) {} - rpc GetNode (GetNodeRequest) returns (GetNodeResponse) {} - rpc SetNode (SetNodeRequest) returns (google.protobuf.Empty) {} - rpc DeleteNode (DeleteNodeRequest) returns (google.protobuf.Empty) {} - rpc GetCluster (google.protobuf.Empty) returns (GetClusterResponse) {} - rpc WatchCluster (google.protobuf.Empty) returns (stream GetClusterResponse) {} + rpc ClusterJoin (ClusterJoinRequest) returns (google.protobuf.Empty) {} + rpc ClusterLeave (ClusterLeaveRequest) returns (google.protobuf.Empty) {} + rpc ClusterInfo (google.protobuf.Empty) returns (ClusterInfoResponse) {} + rpc ClusterWatch (google.protobuf.Empty) returns (stream ClusterWatchResponse) {} rpc GetDocument (GetDocumentRequest) returns (GetDocumentResponse) {} rpc IndexDocument (stream IndexDocumentRequest) returns (IndexDocumentResponse) {} @@ -40,51 +39,76 @@ service Index { rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) {} } -// use for health check -message LivenessProbeResponse { +message NodeHealthCheckRequest { + enum Probe { + HEALTHINESS = 0; + LIVENESS = 1; + READINESS = 2; + } + Probe probe = 1; +} + +message NodeHealthCheckResponse { enum State { - UNKNOWN = 0; - ALIVE = 1; - DEAD = 2; + HEALTHY = 0; + UNHEALTHY = 1; + ALIVE = 2; + DEAD = 3; + READY = 4; + NOT_READY = 5; } State state = 1; } -// use for health check -message ReadinessProbeResponse { +message Metadata { + string grpc_address = 1; + string http_address = 2; +} + +message Node { enum State { UNKNOWN = 0; - READY = 1; - NOT_READY = 2; + FOLLOWER = 1; + CANDIDATE = 2; + LEADER = 3; + SHUTDOWN = 4; } - State state = 1; + string id = 1; + string bind_address = 2; + State state = 3; + Metadata metadata = 4; } -// use for raft cluster status -message GetNodeRequest { - string id = 1; +message Cluster { + map nodes = 1; } -// use for raft cluster status -message GetNodeResponse { - google.protobuf.Any nodeConfig = 1; - string state = 2; +message NodeInfoResponse { + Node node = 1; } -// use for raft cluster status -message SetNodeRequest { - string id = 1; - google.protobuf.Any nodeConfig = 2; +message ClusterJoinRequest { + Node node = 1; } -// use for raft cluster status -message DeleteNodeRequest { +message ClusterLeaveRequest { string id = 1; } -// use for raft cluster status -message GetClusterResponse { - google.protobuf.Any cluster = 1; +message ClusterInfoResponse { + Cluster cluster = 1; +} + +message ClusterWatchResponse { + enum Event { + UNKNOWN = 0; + JOIN = 1; + LEAVE = 2; + UPDATE = 3; + } + Event event = 1; + Node node = 2; + Cluster cluster = 3; } message GetDocumentRequest { From d0c69ef1cb17b63b6de141fd1cb7bb370870a0f4 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Mon, 5 Aug 2019 00:31:25 +0900 Subject: [PATCH 21/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index 7dd4954..81442af 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -21,6 +21,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Change cluster watch command for manager #92 - Change node state to enum from string #93 - Change node info structure #94 +- Change protobuf for indexer and dispatcher #95 ## [v0.7.1] - 2019-07-18 From e3295be350fa75b1be92d5db746bb57507c506aa Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Mon, 5 Aug 2019 12:58:34 +0900 Subject: [PATCH 22/76] Change server arguments (#96) --- cmd/blast/dispatcher_start.go | 15 +------ config/cluster_config.go | 25 ------------ config/cluster_config_test.go | 76 ----------------------------------- config/node_config.go | 57 -------------------------- config/node_config_test.go | 54 ------------------------- dispatcher/grpc_service.go | 2 - dispatcher/server.go | 33 +++++++-------- dispatcher/server_test.go | 39 ++++++++---------- indexer/grpc_service.go | 3 +- indexer/raft_fsm.go | 7 +++- indexer/raft_server.go | 7 +++- manager/grpc_service.go | 16 ++++++-- manager/raft_server.go | 14 ++++++- testutils/testutils.go | 21 +++++----- 14 files changed, 83 insertions(+), 286 deletions(-) delete mode 100644 config/cluster_config.go delete mode 100644 config/cluster_config_test.go delete mode 100644 config/node_config.go delete mode 100644 config/node_config_test.go diff --git a/cmd/blast/dispatcher_start.go b/cmd/blast/dispatcher_start.go index 9c9540f..534bea1 100644 --- a/cmd/blast/dispatcher_start.go +++ b/cmd/blast/dispatcher_start.go @@ -19,7 +19,6 @@ import ( "os/signal" "syscall" - "github.com/mosuka/blast/config" "github.com/mosuka/blast/dispatcher" "github.com/mosuka/blast/logutils" "github.com/urfave/cli" @@ -80,19 +79,7 @@ func dispatcherStart(c *cli.Context) error { httpLogCompress, ) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - if managerAddr != "" { - clusterConfig.ManagerAddr = managerAddr - } - - // create node config - nodeConfig := &config.NodeConfig{ - GRPCAddr: grpcAddr, - HTTPAddr: httpAddr, - } - - svr, err := dispatcher.NewServer(clusterConfig, nodeConfig, logger, grpcLogger, httpAccessLogger) + svr, err := dispatcher.NewServer(managerAddr, grpcAddr, httpAddr, logger, grpcLogger, httpAccessLogger) if err != nil { return err } diff --git a/config/cluster_config.go b/config/cluster_config.go deleted file mode 100644 index fb56a5d..0000000 --- a/config/cluster_config.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -type ClusterConfig struct { - ManagerAddr string `json:"manager_addr,omitempty"` - ClusterId string `json:"cluster_id,omitempty"` - PeerAddr string `json:"peer_addr,omitempty"` -} - -func DefaultClusterConfig() *ClusterConfig { - return &ClusterConfig{} -} diff --git a/config/cluster_config_test.go b/config/cluster_config_test.go deleted file mode 100644 index 24d4c2e..0000000 --- a/config/cluster_config_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "reflect" - "testing" -) - -func TestDefaultClusterConfig(t *testing.T) { - exp := &ClusterConfig{} - act := DefaultClusterConfig() - if !reflect.DeepEqual(exp, act) { - t.Fatalf("expected content to see %v, saw %v", exp, act) - } - - expManagerAddr := "" - actManagerAddr := act.ManagerAddr - if expManagerAddr != actManagerAddr { - t.Fatalf("expected content to see %v, saw %v", expManagerAddr, actManagerAddr) - } - - expClusterId := "" - actClusterId := act.ClusterId - if expClusterId != actClusterId { - t.Fatalf("expected content to see %v, saw %v", expClusterId, actClusterId) - } - - expPeerAddr := "" - actPeerAddr := act.PeerAddr - if expPeerAddr != actPeerAddr { - t.Fatalf("expected content to see %v, saw %v", expPeerAddr, actPeerAddr) - } -} - -func TestClusterConfig_1(t *testing.T) { - expConfig := &ClusterConfig{ - ManagerAddr: ":12000", - ClusterId: "cluster1", - PeerAddr: ":5000", - } - actConfig := DefaultClusterConfig() - actConfig.ManagerAddr = ":12000" - actConfig.ClusterId = "cluster1" - actConfig.PeerAddr = ":5000" - - expManagerAddr := expConfig.ManagerAddr - actManagerAddr := actConfig.ManagerAddr - if expManagerAddr != actManagerAddr { - t.Fatalf("expected content to see %v, saw %v", expManagerAddr, actManagerAddr) - } - - expClusterId := expConfig.ClusterId - actClusterId := actConfig.ClusterId - if expClusterId != actClusterId { - t.Fatalf("expected content to see %v, saw %v", expClusterId, actClusterId) - } - - expPeerAddr := expConfig.PeerAddr - actPeerAddr := actConfig.PeerAddr - if expPeerAddr != actPeerAddr { - t.Fatalf("expected content to see %v, saw %v", expPeerAddr, actPeerAddr) - } -} diff --git a/config/node_config.go b/config/node_config.go deleted file mode 100644 index 7b0b61b..0000000 --- a/config/node_config.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "encoding/json" - "fmt" - - "github.com/mosuka/blast/strutils" -) - -type NodeConfig struct { - NodeId string `json:"node_id,omitempty"` - BindAddr string `json:"bind_addr,omitempty"` - GRPCAddr string `json:"grpc_addr,omitempty"` - HTTPAddr string `json:"http_addr,omitempty"` - DataDir string `json:"data_dir,omitempty"` - RaftStorageType string `json:"raft_storage_type,omitempty"` -} - -func DefaultNodeConfig() *NodeConfig { - return &NodeConfig{ - NodeId: fmt.Sprintf("node-%s", strutils.RandStr(5)), - BindAddr: ":2000", - GRPCAddr: ":5000", - HTTPAddr: ":8000", - DataDir: "/tmp/blast", - RaftStorageType: "boltdb", - } -} - -func (c *NodeConfig) ToMap() map[string]interface{} { - b, err := json.Marshal(c) - if err != nil { - return map[string]interface{}{} - } - - var m map[string]interface{} - err = json.Unmarshal(b, &m) - if err != nil { - return map[string]interface{}{} - } - - return m -} diff --git a/config/node_config_test.go b/config/node_config_test.go deleted file mode 100644 index da69880..0000000 --- a/config/node_config_test.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "fmt" - "testing" - - "github.com/mosuka/blast/strutils" -) - -func TestDefaultNodeConfig(t *testing.T) { - expConfig := &NodeConfig{ - NodeId: fmt.Sprintf("node-%s", strutils.RandStr(5)), - BindAddr: ":2000", - GRPCAddr: ":5000", - HTTPAddr: ":8000", - DataDir: "/tmp/blast", - RaftStorageType: "boltdb", - } - actConfig := DefaultNodeConfig() - - if expConfig.BindAddr != actConfig.BindAddr { - t.Fatalf("expected content to see %v, saw %v", expConfig.BindAddr, actConfig.BindAddr) - } - - if expConfig.GRPCAddr != actConfig.GRPCAddr { - t.Fatalf("expected content to see %v, saw %v", expConfig.GRPCAddr, actConfig.GRPCAddr) - } - - if expConfig.HTTPAddr != actConfig.HTTPAddr { - t.Fatalf("expected content to see %v, saw %v", expConfig.HTTPAddr, actConfig.HTTPAddr) - } - - if expConfig.DataDir != actConfig.DataDir { - t.Fatalf("expected content to see %v, saw %v", expConfig.DataDir, actConfig.DataDir) - } - - if expConfig.RaftStorageType != actConfig.RaftStorageType { - t.Fatalf("expected content to see %v, saw %v", expConfig.RaftStorageType, actConfig.RaftStorageType) - } -} diff --git a/dispatcher/grpc_service.go b/dispatcher/grpc_service.go index f51c94c..9ebf9e0 100644 --- a/dispatcher/grpc_service.go +++ b/dispatcher/grpc_service.go @@ -51,7 +51,6 @@ type GRPCService struct { updateManagersStopCh chan struct{} updateManagersDoneCh chan struct{} - //indexers map[string]interface{} indexers map[string]*index.Cluster indexerClients map[string]map[string]*indexer.GRPCClient updateIndexersStopCh chan struct{} @@ -66,7 +65,6 @@ func NewGRPCService(managerGrpcAddress string, logger *zap.Logger) (*GRPCService managers: &management.Cluster{Nodes: make(map[string]*management.Node, 0)}, managerClients: make(map[string]*manager.GRPCClient, 0), - //indexers: make(map[string]interface{}, 0), indexers: make(map[string]*index.Cluster, 0), indexerClients: make(map[string]map[string]*indexer.GRPCClient, 0), }, nil diff --git a/dispatcher/server.go b/dispatcher/server.go index d088d51..447c51a 100644 --- a/dispatcher/server.go +++ b/dispatcher/server.go @@ -16,16 +16,16 @@ package dispatcher import ( accesslog "github.com/mash/go-accesslog" - "github.com/mosuka/blast/config" "go.uber.org/zap" ) type Server struct { - clusterConfig *config.ClusterConfig - nodeConfig *config.NodeConfig - logger *zap.Logger - grpcLogger *zap.Logger - httpLogger accesslog.Logger + managerGrpcAddress string + grpcAddress string + httpAddress string + logger *zap.Logger + grpcLogger *zap.Logger + httpLogger accesslog.Logger grpcService *GRPCService grpcServer *GRPCServer @@ -33,13 +33,14 @@ type Server struct { httpServer *HTTPServer } -func NewServer(clusterConfig *config.ClusterConfig, nodeConfig *config.NodeConfig, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { +func NewServer(managerGrpcAddress string, grpcAddress string, httpAddress string, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { return &Server{ - clusterConfig: clusterConfig, - nodeConfig: nodeConfig, - logger: logger, - grpcLogger: grpcLogger, - httpLogger: httpLogger, + managerGrpcAddress: managerGrpcAddress, + grpcAddress: grpcAddress, + httpAddress: httpAddress, + logger: logger, + grpcLogger: grpcLogger, + httpLogger: httpLogger, }, nil } @@ -47,28 +48,28 @@ func (s *Server) Start() { var err error // create gRPC service - s.grpcService, err = NewGRPCService(s.clusterConfig.ManagerAddr, s.logger) + s.grpcService, err = NewGRPCService(s.managerGrpcAddress, s.logger) if err != nil { s.logger.Fatal(err.Error()) return } // create gRPC server - s.grpcServer, err = NewGRPCServer(s.nodeConfig.GRPCAddr, s.grpcService, s.grpcLogger) + s.grpcServer, err = NewGRPCServer(s.grpcAddress, s.grpcService, s.grpcLogger) if err != nil { s.logger.Fatal(err.Error()) return } // create HTTP router - s.httpRouter, err = NewRouter(s.nodeConfig.GRPCAddr, s.logger) + s.httpRouter, err = NewRouter(s.grpcAddress, s.logger) if err != nil { s.logger.Fatal(err.Error()) return } // create HTTP server - s.httpServer, err = NewHTTPServer(s.nodeConfig.HTTPAddr, s.httpRouter, s.logger, s.httpLogger) + s.httpServer, err = NewHTTPServer(s.httpAddress, s.httpRouter, s.logger, s.httpLogger) if err != nil { s.logger.Fatal(err.Error()) return diff --git a/dispatcher/server_test.go b/dispatcher/server_test.go index 29d0577..63922e1 100644 --- a/dispatcher/server_test.go +++ b/dispatcher/server_test.go @@ -22,7 +22,6 @@ import ( "testing" "time" - "github.com/mosuka/blast/config" "github.com/mosuka/blast/indexer" "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/manager" @@ -534,27 +533,23 @@ func TestServer_Start(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expIndexerCluster2, actIndexerCluster2) } + //// + //// dispatcher + //// + //dispatcherManagerGrpcAddress := managerGrpcAddress1 + //dispatcherGrpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + //dispatcherHttpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) // - // dispatcher + //dispatcher1, err := NewServer(dispatcherManagerGrpcAddress, dispatcherGrpcAddress, dispatcherHttpAddress, logger.Named("dispatcher1"), grpcLogger.Named("dispatcher1"), httpAccessLogger) + //defer func() { + // dispatcher1.Stop() + //}() + //if err != nil { + // t.Fatalf("%v", err) + //} + //// start server + //dispatcher1.Start() // - // create cluster config - dispatcherClusterConfig1 := config.DefaultClusterConfig() - dispatcherClusterConfig1.ManagerAddr = managerGrpcAddress1 - // create node config - dispatcherNodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(dispatcherNodeConfig.DataDir) - }() - dispatcher1, err := NewServer(dispatcherClusterConfig1, dispatcherNodeConfig, logger.Named("dispatcher1"), grpcLogger.Named("dispatcher1"), httpAccessLogger) - defer func() { - dispatcher1.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - // start server - dispatcher1.Start() - - // sleep - time.Sleep(5 * time.Second) + //// sleep + //time.Sleep(5 * time.Second) } diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go index 18007ff..87b9ba3 100644 --- a/indexer/grpc_service.go +++ b/indexer/grpc_service.go @@ -763,11 +763,12 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *index.GetDocumentReq fields, err := s.raftServer.GetDocument(req.Id) if err != nil { - s.logger.Error(err.Error()) switch err { case blasterrors.ErrNotFound: + s.logger.Debug(err.Error(), zap.String("id", req.Id)) return resp, status.Error(codes.NotFound, err.Error()) default: + s.logger.Error(err.Error(), zap.String("id", req.Id)) return resp, status.Error(codes.Internal, err.Error()) } } diff --git a/indexer/raft_fsm.go b/indexer/raft_fsm.go index 3d6bfc9..01d047e 100644 --- a/indexer/raft_fsm.go +++ b/indexer/raft_fsm.go @@ -113,7 +113,12 @@ func (f *RaftFSM) DeleteNode(nodeId string) error { func (f *RaftFSM) GetDocument(id string) (map[string]interface{}, error) { fields, err := f.index.Get(id) if err != nil { - f.logger.Error(err.Error()) + switch err { + case blasterrors.ErrNotFound: + f.logger.Debug(err.Error(), zap.String("id", id)) + default: + f.logger.Error(err.Error(), zap.String("id", id)) + } return nil, err } diff --git a/indexer/raft_server.go b/indexer/raft_server.go index c2fa628..60a9004 100644 --- a/indexer/raft_server.go +++ b/indexer/raft_server.go @@ -526,7 +526,12 @@ func (s *RaftServer) Snapshot() error { func (s *RaftServer) GetDocument(id string) (map[string]interface{}, error) { fields, err := s.fsm.GetDocument(id) if err != nil { - s.logger.Error(err.Error()) + switch err { + case blasterrors.ErrNotFound: + s.logger.Debug(err.Error(), zap.String("id", id)) + default: + s.logger.Error(err.Error(), zap.String("id", id)) + } return nil, err } diff --git a/manager/grpc_service.go b/manager/grpc_service.go index 47b65a2..6bccc35 100644 --- a/manager/grpc_service.go +++ b/manager/grpc_service.go @@ -504,11 +504,12 @@ func (s *GRPCService) Get(ctx context.Context, req *management.GetRequest) (*man value, err := s.raftServer.GetValue(req.Key) if err != nil { - s.logger.Error(err.Error()) switch err { case blasterrors.ErrNotFound: + s.logger.Debug(err.Error(), zap.String("key", req.Key)) return resp, status.Error(codes.NotFound, err.Error()) default: + s.logger.Error(err.Error(), zap.String("key", req.Key)) return resp, status.Error(codes.Internal, err.Error()) } } @@ -587,11 +588,12 @@ func (s *GRPCService) Delete(ctx context.Context, req *management.DeleteRequest) if s.raftServer.IsLeader() { err := s.raftServer.DeleteValue(req.Key) if err != nil { - s.logger.Error(err.Error()) switch err { case blasterrors.ErrNotFound: + s.logger.Debug(err.Error(), zap.String("key", req.Key)) return resp, status.Error(codes.NotFound, err.Error()) default: + s.logger.Error(err.Error(), zap.String("key", req.Key)) return resp, status.Error(codes.Internal, err.Error()) } } @@ -604,8 +606,14 @@ func (s *GRPCService) Delete(ctx context.Context, req *management.DeleteRequest) } err = client.Delete(req.Key) if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) + switch err { + case blasterrors.ErrNotFound: + s.logger.Debug(err.Error(), zap.String("key", req.Key)) + return resp, status.Error(codes.NotFound, err.Error()) + default: + s.logger.Error(err.Error(), zap.String("key", req.Key)) + return resp, status.Error(codes.Internal, err.Error()) + } } } diff --git a/manager/raft_server.go b/manager/raft_server.go index 76e3324..5a75f47 100644 --- a/manager/raft_server.go +++ b/manager/raft_server.go @@ -534,7 +534,12 @@ func (s *RaftServer) Snapshot() error { func (s *RaftServer) GetValue(key string) (interface{}, error) { value, err := s.fsm.GetValue(key) if err != nil { - s.logger.Error(err.Error()) + switch err { + case blasterrors.ErrNotFound: + s.logger.Debug(err.Error(), zap.String("key", key)) + default: + s.logger.Error(err.Error(), zap.String("key", key)) + } return nil, err } @@ -611,7 +616,12 @@ func (s *RaftServer) DeleteValue(key string) error { } err = f.Response().(*fsmResponse).error if err != nil { - s.logger.Error(err.Error()) + switch err { + case blasterrors.ErrNotFound: + s.logger.Debug(err.Error(), zap.String("key", key)) + default: + s.logger.Error(err.Error(), zap.String("key", key)) + } return err } diff --git a/testutils/testutils.go b/testutils/testutils.go index ecae708..758e804 100644 --- a/testutils/testutils.go +++ b/testutils/testutils.go @@ -15,7 +15,6 @@ package testutils import ( - "fmt" "io/ioutil" "net" @@ -46,16 +45,16 @@ func TmpPort() int { return l.Addr().(*net.TCPAddr).Port } -func TmpNodeConfig() *config.NodeConfig { - c := config.DefaultNodeConfig() - - c.BindAddr = fmt.Sprintf(":%d", TmpPort()) - c.GRPCAddr = fmt.Sprintf(":%d", TmpPort()) - c.HTTPAddr = fmt.Sprintf(":%d", TmpPort()) - c.DataDir = TmpDir() - - return c -} +//func TmpNodeConfig() *config.NodeConfig { +// c := config.DefaultNodeConfig() +// +// c.BindAddr = fmt.Sprintf(":%d", TmpPort()) +// c.GRPCAddr = fmt.Sprintf(":%d", TmpPort()) +// c.HTTPAddr = fmt.Sprintf(":%d", TmpPort()) +// c.DataDir = TmpDir() +// +// return c +//} func TmpIndexConfig(indexMappingFile string, indexType string, indexStorageType string) (*config.IndexConfig, error) { indexMapping, err := indexutils.NewIndexMappingFromFile(indexMappingFile) From ea1e45d67c45ddc70dd1e0b40e14b8d18a889064 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Mon, 5 Aug 2019 12:59:26 +0900 Subject: [PATCH 23/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index 81442af..3cc4182 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -22,6 +22,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Change node state to enum from string #93 - Change node info structure #94 - Change protobuf for indexer and dispatcher #95 +- Change server arguments #96 ## [v0.7.1] - 2019-07-18 From 11881db1146627f1012b033b5549ef6027757e98 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 6 Aug 2019 09:29:40 +0900 Subject: [PATCH 24/76] Change index protobuf (#97) --- cmd/blast/indexer_start.go | 10 +- cmd/blast/manager_start.go | 10 +- config/index_config.go | 66 ------------ config/index_config_test.go | 36 ------- dispatcher/server_test.go | 56 ++++++---- indexer/grpc_client.go | 13 +-- indexer/grpc_service.go | 27 +++-- indexer/index.go | 26 +++-- indexer/raft_fsm.go | 23 ++-- indexer/raft_server.go | 34 +++--- indexer/server.go | 47 +++++--- indexer/server_test.go | 174 +++++++++++++++++++----------- manager/grpc_server.go | 16 +++ manager/http_server.go | 16 +++ manager/raft_server.go | 69 ++++++++---- manager/server.go | 64 +++++++---- manager/server_test.go | 169 +++++++++++++++++++---------- protobuf/index/index.pb.go | 208 +++++++++++++++++++++++------------- protobuf/index/index.proto | 8 +- 19 files changed, 641 insertions(+), 431 deletions(-) delete mode 100644 config/index_config.go delete mode 100644 config/index_config_test.go diff --git a/cmd/blast/indexer_start.go b/cmd/blast/indexer_start.go index 0afd811..b20e689 100644 --- a/cmd/blast/indexer_start.go +++ b/cmd/blast/indexer_start.go @@ -20,7 +20,6 @@ import ( "syscall" "github.com/blevesearch/bleve/mapping" - "github.com/mosuka/blast/config" "github.com/mosuka/blast/indexer" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/logutils" @@ -117,14 +116,7 @@ func indexerStart(c *cli.Context) error { indexMapping = mapping.NewIndexMapping() } - // create index config - indexConfig := &config.IndexConfig{ - IndexMapping: indexMapping, - IndexType: indexType, - IndexStorageType: indexStorageType, - } - - svr, err := indexer.NewServer(managerGRPCAddr, shardId, peerGRPCAddr, node, dataDir, raftStorageType, indexConfig, logger.Named(nodeId), grpcLogger.Named(nodeId), httpAccessLogger) + svr, err := indexer.NewServer(managerGRPCAddr, shardId, peerGRPCAddr, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger.Named(nodeId), grpcLogger.Named(nodeId), httpAccessLogger) if err != nil { return err } diff --git a/cmd/blast/manager_start.go b/cmd/blast/manager_start.go index ee2c93c..81385ab 100644 --- a/cmd/blast/manager_start.go +++ b/cmd/blast/manager_start.go @@ -20,7 +20,6 @@ import ( "syscall" "github.com/blevesearch/bleve/mapping" - "github.com/mosuka/blast/config" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/manager" @@ -115,14 +114,7 @@ func managerStart(c *cli.Context) error { indexMapping = mapping.NewIndexMapping() } - // create index config - indexConfig := &config.IndexConfig{ - IndexMapping: indexMapping, - IndexType: indexType, - IndexStorageType: indexStorageType, - } - - svr, err := manager.NewServer(peerGrpcAddr, node, dataDir, raftStorageType, indexConfig, logger.Named(nodeId), grpcLogger.Named(nodeId), httpLogger) + svr, err := manager.NewServer(peerGrpcAddr, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger.Named(nodeId), grpcLogger.Named(nodeId), httpLogger) if err != nil { return err } diff --git a/config/index_config.go b/config/index_config.go deleted file mode 100644 index 89fd4e2..0000000 --- a/config/index_config.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "encoding/json" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/mapping" -) - -type IndexConfig struct { - IndexMapping *mapping.IndexMappingImpl `json:"index_mapping,omitempty"` - IndexType string `json:"index_type,omitempty"` - IndexStorageType string `json:"index_storage_type,omitempty"` -} - -func DefaultIndexConfig() *IndexConfig { - return &IndexConfig{ - IndexMapping: mapping.NewIndexMapping(), - IndexType: bleve.Config.DefaultIndexType, - IndexStorageType: bleve.Config.DefaultKVStore, - } -} - -func NewIndexConfigFromMap(src map[string]interface{}) *IndexConfig { - b, err := json.Marshal(src) - if err != nil { - return &IndexConfig{} - } - - var indexConfig *IndexConfig - err = json.Unmarshal(b, &indexConfig) - if err != nil { - return &IndexConfig{} - } - - return indexConfig -} - -func (c *IndexConfig) ToMap() map[string]interface{} { - b, err := json.Marshal(c) - if err != nil { - return map[string]interface{}{} - } - - var m map[string]interface{} - err = json.Unmarshal(b, &m) - if err != nil { - return map[string]interface{}{} - } - - return m -} diff --git a/config/index_config_test.go b/config/index_config_test.go deleted file mode 100644 index 674766c..0000000 --- a/config/index_config_test.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "reflect" - "testing" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/mapping" -) - -func TestDefaultIndexConfig(t *testing.T) { - expConfig := &IndexConfig{ - IndexMapping: mapping.NewIndexMapping(), - IndexType: bleve.Config.DefaultIndexType, - IndexStorageType: bleve.Config.DefaultKVStore, - } - actConfig := DefaultIndexConfig() - - if !reflect.DeepEqual(expConfig, actConfig) { - t.Fatalf("expected content to see %v, saw %v", expConfig, actConfig) - } -} diff --git a/dispatcher/server_test.go b/dispatcher/server_test.go index 63922e1..5500ed2 100644 --- a/dispatcher/server_test.go +++ b/dispatcher/server_test.go @@ -22,6 +22,8 @@ import ( "testing" "time" + "github.com/mosuka/blast/indexutils" + "github.com/mosuka/blast/indexer" "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/manager" @@ -56,13 +58,15 @@ func TestServer_Start(t *testing.T) { }, } - managerIndexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + managerIndexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + managerIndexType1 := "upside_down" + managerIndexStorageType1 := "boltdb" // create server - managerServer1, err := manager.NewServer(managerPeerGrpcAddress1, managerNode1, managerDataDir1, managerRaftStorageType1, managerIndexConfig1, logger, grpcLogger, httpAccessLogger) + managerServer1, err := manager.NewServer(managerPeerGrpcAddress1, managerNode1, managerDataDir1, managerRaftStorageType1, managerIndexMapping1, managerIndexType1, managerIndexStorageType1, logger, grpcLogger, httpAccessLogger) defer func() { if managerServer1 != nil { managerServer1.Stop() @@ -93,13 +97,15 @@ func TestServer_Start(t *testing.T) { }, } - managerIndexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + managerIndexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + managerIndexType2 := "upside_down" + managerIndexStorageType2 := "boltdb" // create server - managerServer2, err := manager.NewServer(managerPeerGrpcAddress2, managerNode2, managerDataDir2, managerRaftStorageType2, managerIndexConfig2, logger, grpcLogger, httpAccessLogger) + managerServer2, err := manager.NewServer(managerPeerGrpcAddress2, managerNode2, managerDataDir2, managerRaftStorageType2, managerIndexMapping2, managerIndexType2, managerIndexStorageType2, logger, grpcLogger, httpAccessLogger) defer func() { if managerServer2 != nil { managerServer2.Stop() @@ -130,13 +136,15 @@ func TestServer_Start(t *testing.T) { }, } - managerIndexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + managerIndexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + managerIndexType3 := "upside_down" + managerIndexStorageType3 := "boltdb" // create server - managerServer3, err := manager.NewServer(managerPeerGrpcAddress3, managerNode3, managerDataDir3, managerRaftStorageType3, managerIndexConfig3, logger, grpcLogger, httpAccessLogger) + managerServer3, err := manager.NewServer(managerPeerGrpcAddress3, managerNode3, managerDataDir3, managerRaftStorageType3, managerIndexMapping3, managerIndexType3, managerIndexStorageType3, logger, grpcLogger, httpAccessLogger) defer func() { if managerServer3 != nil { managerServer3.Stop() @@ -226,11 +234,13 @@ func TestServer_Start(t *testing.T) { HttpAddress: indexerHttpAddress1, }, } - indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexerIndexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } - indexerServer1, err := indexer.NewServer(indexerManagerGrpcAddress1, indexerShardId1, indexerPeerGrpcAddress1, indexerNode1, indexerDataDir1, indexerRaftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) + indexerIndexType1 := "upside_down" + indexerIndexStorageType1 := "boltdb" + indexerServer1, err := indexer.NewServer(indexerManagerGrpcAddress1, indexerShardId1, indexerPeerGrpcAddress1, indexerNode1, indexerDataDir1, indexerRaftStorageType1, indexerIndexMapping1, indexerIndexType1, indexerIndexStorageType1, logger, grpcLogger, httpAccessLogger) defer func() { indexerServer1.Stop() }() @@ -264,11 +274,13 @@ func TestServer_Start(t *testing.T) { HttpAddress: indexerHttpAddress2, }, } - indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexerIndexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } - indexerServer2, err := indexer.NewServer(indexerManagerGrpcAddress2, indexerShardId2, indexerPeerGrpcAddress2, indexerNode2, indexerDataDir2, indexerRaftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) + indexerIndexType2 := "upside_down" + indexerIndexStorageType2 := "boltdb" + indexerServer2, err := indexer.NewServer(indexerManagerGrpcAddress2, indexerShardId2, indexerPeerGrpcAddress2, indexerNode2, indexerDataDir2, indexerRaftStorageType2, indexerIndexMapping2, indexerIndexType2, indexerIndexStorageType2, logger, grpcLogger, httpAccessLogger) defer func() { indexerServer2.Stop() }() @@ -302,11 +314,13 @@ func TestServer_Start(t *testing.T) { HttpAddress: indexerHttpAddress3, }, } - indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexerIndexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } - indexerServer3, err := indexer.NewServer(indexerManagerGrpcAddress3, indexerShardId3, indexerPeerGrpcAddress3, indexerNode3, indexerDataDir3, indexerRaftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) + indexerIndexType3 := "upside_down" + indexerIndexStorageType3 := "boltdb" + indexerServer3, err := indexer.NewServer(indexerManagerGrpcAddress3, indexerShardId3, indexerPeerGrpcAddress3, indexerNode3, indexerDataDir3, indexerRaftStorageType3, indexerIndexMapping3, indexerIndexType3, indexerIndexStorageType3, logger, grpcLogger, httpAccessLogger) defer func() { indexerServer3.Stop() }() @@ -392,11 +406,13 @@ func TestServer_Start(t *testing.T) { HttpAddress: indexerHttpAddress4, }, } - indexConfig4, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexerIndexMapping4, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } - indexerServer4, err := indexer.NewServer(indexerManagerGrpcAddress4, indexerShardId4, indexerPeerGrpcAddress4, indexerNode4, indexerDataDir4, indexerRaftStorageType4, indexConfig4, logger, grpcLogger, httpAccessLogger) + indexerIndexType4 := "upside_down" + indexerIndexStorageType4 := "boltdb" + indexerServer4, err := indexer.NewServer(indexerManagerGrpcAddress4, indexerShardId4, indexerPeerGrpcAddress4, indexerNode4, indexerDataDir4, indexerRaftStorageType4, indexerIndexMapping4, indexerIndexType4, indexerIndexStorageType4, logger, grpcLogger, httpAccessLogger) defer func() { indexerServer4.Stop() }() @@ -430,11 +446,13 @@ func TestServer_Start(t *testing.T) { HttpAddress: indexerHttpAddress5, }, } - indexConfig5, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexerIndexMapping5, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } - indexerServer5, err := indexer.NewServer(indexerManagerGrpcAddress5, indexerShardId5, indexerPeerGrpcAddress5, indexerNode5, indexerDataDir5, indexerRaftStorageType5, indexConfig5, logger, grpcLogger, httpAccessLogger) + indexerIndexType5 := "upside_down" + indexerIndexStorageType5 := "boltdb" + indexerServer5, err := indexer.NewServer(indexerManagerGrpcAddress5, indexerShardId5, indexerPeerGrpcAddress5, indexerNode5, indexerDataDir5, indexerRaftStorageType5, indexerIndexMapping5, indexerIndexType5, indexerIndexStorageType5, logger, grpcLogger, httpAccessLogger) defer func() { indexerServer5.Stop() }() @@ -468,11 +486,13 @@ func TestServer_Start(t *testing.T) { HttpAddress: indexerHttpAddress6, }, } - indexConfig6, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexerIndexMapping6, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } - indexerServer6, err := indexer.NewServer(indexerManagerGrpcAddress6, indexerShardId6, indexerPeerGrpcAddress6, indexerNode6, indexerDataDir6, indexerRaftStorageType6, indexConfig6, logger, grpcLogger, httpAccessLogger) + indexerIndexType6 := "upside_down" + indexerIndexStorageType6 := "boltdb" + indexerServer6, err := indexer.NewServer(indexerManagerGrpcAddress6, indexerShardId6, indexerPeerGrpcAddress6, indexerNode6, indexerDataDir6, indexerRaftStorageType6, indexerIndexMapping6, indexerIndexType6, indexerIndexStorageType6, logger, grpcLogger, httpAccessLogger) defer func() { indexerServer6.Stop() }() diff --git a/indexer/grpc_client.go b/indexer/grpc_client.go index e955e3b..4759e19 100644 --- a/indexer/grpc_client.go +++ b/indexer/grpc_client.go @@ -306,17 +306,20 @@ func (c *GRPCClient) GetIndexConfig(opts ...grpc.CallOption) (map[string]interfa resp, err := c.client.GetIndexConfig(c.ctx, &empty.Empty{}, opts...) if err != nil { st, _ := status.FromError(err) - return nil, errors.New(st.Message()) } - indexConfigIntr, err := protobuf.MarshalAny(resp.IndexConfig) + indexMapping, err := protobuf.MarshalAny(resp.IndexConfig.IndexMapping) if err != nil { st, _ := status.FromError(err) - return nil, errors.New(st.Message()) } - indexConfig := *indexConfigIntr.(*map[string]interface{}) + + indexConfig := map[string]interface{}{ + "index_mapping": indexMapping, + "index_type": resp.IndexConfig.IndexType, + "index_storage_type": resp.IndexConfig.IndexStorageType, + } return indexConfig, nil } @@ -325,14 +328,12 @@ func (c *GRPCClient) GetIndexStats(opts ...grpc.CallOption) (map[string]interfac resp, err := c.client.GetIndexStats(c.ctx, &empty.Empty{}, opts...) if err != nil { st, _ := status.FromError(err) - return nil, errors.New(st.Message()) } indexStatsIntr, err := protobuf.MarshalAny(resp.IndexStats) if err != nil { st, _ := status.FromError(err) - return nil, errors.New(st.Message()) } indexStats := *indexStatsIntr.(*map[string]interface{}) diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go index 87b9ba3..2613c56 100644 --- a/indexer/grpc_service.go +++ b/indexer/grpc_service.go @@ -23,6 +23,8 @@ import ( "sync" "time" + "github.com/blevesearch/bleve/mapping" + "github.com/blevesearch/bleve" "github.com/golang/protobuf/ptypes/any" "github.com/golang/protobuf/ptypes/empty" @@ -922,7 +924,9 @@ func (s *GRPCService) DeleteDocument(stream index.Index_DeleteDocumentServer) er } func (s *GRPCService) GetIndexConfig(ctx context.Context, req *empty.Empty) (*index.GetIndexConfigResponse, error) { - resp := &index.GetIndexConfigResponse{} + resp := &index.GetIndexConfigResponse{ + IndexConfig: &index.IndexConfig{}, + } indexConfig, err := s.raftServer.GetIndexConfig() if err != nil { @@ -930,14 +934,23 @@ func (s *GRPCService) GetIndexConfig(ctx context.Context, req *empty.Empty) (*in return resp, status.Error(codes.Internal, err.Error()) } - indexConfigAny := &any.Any{} - err = protobuf.UnmarshalAny(indexConfig, indexConfigAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) + if indexMapping, ok := indexConfig["index_mapping"]; ok { + indexMappingAny := &any.Any{} + err = protobuf.UnmarshalAny(indexMapping.(*mapping.IndexMappingImpl), indexMappingAny) + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) + } + resp.IndexConfig.IndexMapping = indexMappingAny } - resp.IndexConfig = indexConfigAny + if indexType, ok := indexConfig["index_type"]; ok { + resp.IndexConfig.IndexType = indexType.(string) + } + + if indexStorageType, ok := indexConfig["index_storage_type"]; ok { + resp.IndexConfig.IndexStorageType = indexStorageType.(string) + } return resp, nil } diff --git a/indexer/index.go b/indexer/index.go index 986c09b..accc520 100644 --- a/indexer/index.go +++ b/indexer/index.go @@ -21,8 +21,8 @@ import ( "github.com/blevesearch/bleve" "github.com/blevesearch/bleve/document" + "github.com/blevesearch/bleve/mapping" "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/config" "github.com/mosuka/blast/errors" "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/index" @@ -30,20 +30,22 @@ import ( ) type Index struct { - indexConfig *config.IndexConfig - logger *zap.Logger + indexMapping *mapping.IndexMappingImpl + indexType string + indexStorageType string + logger *zap.Logger index bleve.Index } -func NewIndex(dir string, indexConfig *config.IndexConfig, logger *zap.Logger) (*Index, error) { +func NewIndex(dir string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, logger *zap.Logger) (*Index, error) { //bleve.SetLog(logger) var index bleve.Index _, err := os.Stat(dir) if os.IsNotExist(err) { // create new index - index, err = bleve.NewUsing(dir, indexConfig.IndexMapping, indexConfig.IndexType, indexConfig.IndexStorageType, nil) + index, err = bleve.NewUsing(dir, indexMapping, indexType, indexStorageType, nil) if err != nil { logger.Error(err.Error()) return nil, err @@ -61,9 +63,11 @@ func NewIndex(dir string, indexConfig *config.IndexConfig, logger *zap.Logger) ( } return &Index{ - index: index, - indexConfig: indexConfig, - logger: logger, + index: index, + indexMapping: indexMapping, + indexType: indexType, + indexStorageType: indexStorageType, + logger: logger, }, nil } @@ -210,7 +214,11 @@ func (i *Index) BulkDelete(ids []string) (int, error) { } func (i *Index) Config() (map[string]interface{}, error) { - return i.indexConfig.ToMap(), nil + return map[string]interface{}{ + "index_mapping": i.indexMapping, + "index_type": i.indexType, + "index_storage_type": i.indexStorageType, + }, nil } func (i *Index) Stats() (map[string]interface{}, error) { diff --git a/indexer/raft_fsm.go b/indexer/raft_fsm.go index 01d047e..a12c541 100644 --- a/indexer/raft_fsm.go +++ b/indexer/raft_fsm.go @@ -21,10 +21,11 @@ import ( "io/ioutil" "sync" + "github.com/blevesearch/bleve/mapping" + "github.com/blevesearch/bleve" "github.com/golang/protobuf/proto" "github.com/hashicorp/raft" - "github.com/mosuka/blast/config" blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/index" @@ -32,9 +33,11 @@ import ( ) type RaftFSM struct { - path string - indexConfig *config.IndexConfig - logger *zap.Logger + path string + indexMapping *mapping.IndexMappingImpl + indexType string + indexStorageType string + logger *zap.Logger cluster *index.Cluster clusterMutex sync.RWMutex @@ -42,11 +45,13 @@ type RaftFSM struct { index *Index } -func NewRaftFSM(path string, indexConfig *config.IndexConfig, logger *zap.Logger) (*RaftFSM, error) { +func NewRaftFSM(path string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, logger *zap.Logger) (*RaftFSM, error) { return &RaftFSM{ - path: path, - indexConfig: indexConfig, - logger: logger, + path: path, + indexMapping: indexMapping, + indexType: indexType, + indexStorageType: indexStorageType, + logger: logger, }, nil } @@ -56,7 +61,7 @@ func (f *RaftFSM) Start() error { f.logger.Info("initialize index") var err error - f.index, err = NewIndex(f.path, f.indexConfig, f.logger) + f.index, err = NewIndex(f.path, f.indexMapping, f.indexType, f.indexStorageType, f.logger) if err != nil { f.logger.Error(err.Error()) return err diff --git a/indexer/raft_server.go b/indexer/raft_server.go index 60a9004..c3ccf73 100644 --- a/indexer/raft_server.go +++ b/indexer/raft_server.go @@ -24,11 +24,11 @@ import ( "time" "github.com/blevesearch/bleve" + "github.com/blevesearch/bleve/mapping" "github.com/hashicorp/raft" raftboltdb "github.com/hashicorp/raft-boltdb" raftbadgerdb "github.com/markthethomas/raft-badger" _ "github.com/mosuka/blast/builtins" - "github.com/mosuka/blast/config" blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/protobuf/index" @@ -37,25 +37,29 @@ import ( ) type RaftServer struct { - node *index.Node - dataDir string - raftStorageType string - indexConfig *config.IndexConfig - bootstrap bool - logger *zap.Logger + node *index.Node + dataDir string + raftStorageType string + indexMapping *mapping.IndexMappingImpl + indexType string + indexStorageType string + bootstrap bool + logger *zap.Logger raft *raft.Raft fsm *RaftFSM } -func NewRaftServer(node *index.Node, dataDir string, raftStorageType string, indexConfig *config.IndexConfig, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { +func NewRaftServer(node *index.Node, dataDir string, raftStorageType string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { return &RaftServer{ - node: node, - dataDir: dataDir, - raftStorageType: raftStorageType, - indexConfig: indexConfig, - bootstrap: bootstrap, - logger: logger, + node: node, + dataDir: dataDir, + raftStorageType: raftStorageType, + indexMapping: indexMapping, + indexType: indexType, + indexStorageType: indexStorageType, + bootstrap: bootstrap, + logger: logger, }, nil } @@ -64,7 +68,7 @@ func (s *RaftServer) Start() error { fsmPath := filepath.Join(s.dataDir, "index") s.logger.Info("create finite state machine", zap.String("path", fsmPath)) - s.fsm, err = NewRaftFSM(fsmPath, s.indexConfig, s.logger) + s.fsm, err = NewRaftFSM(fsmPath, s.indexMapping, s.indexType, s.indexStorageType, s.logger) if err != nil { s.logger.Fatal(err.Error()) return err diff --git a/indexer/server.go b/indexer/server.go index 1ffa188..d6b8bc2 100644 --- a/indexer/server.go +++ b/indexer/server.go @@ -18,8 +18,11 @@ import ( "encoding/json" "fmt" + "github.com/mosuka/blast/indexutils" + + "github.com/blevesearch/bleve/mapping" + accesslog "github.com/mash/go-accesslog" - "github.com/mosuka/blast/config" "github.com/mosuka/blast/errors" "github.com/mosuka/blast/manager" "github.com/mosuka/blast/protobuf/index" @@ -33,7 +36,9 @@ type Server struct { node *index.Node dataDir string raftStorageType string - indexConfig *config.IndexConfig + indexMapping *mapping.IndexMappingImpl + indexType string + indexStorageType string logger *zap.Logger grpcLogger *zap.Logger httpLogger accesslog.Logger @@ -45,7 +50,7 @@ type Server struct { httpServer *HTTPServer } -func NewServer(managerGrpcAddress string, shardId string, peerGrpcAddress string, node *index.Node, dataDir string, raftStorageType string, indexConfig *config.IndexConfig, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { +func NewServer(managerGrpcAddress string, shardId string, peerGrpcAddress string, node *index.Node, dataDir string, raftStorageType string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { return &Server{ managerGrpcAddress: managerGrpcAddress, shardId: shardId, @@ -53,7 +58,9 @@ func NewServer(managerGrpcAddress string, shardId string, peerGrpcAddress string node: node, dataDir: dataDir, raftStorageType: raftStorageType, - indexConfig: indexConfig, + indexMapping: indexMapping, + indexType: indexType, + indexStorageType: indexStorageType, logger: logger, grpcLogger: grpcLogger, httpLogger: httpLogger, @@ -126,16 +133,32 @@ func (s *Server) Start() { s.logger.Fatal(err.Error()) return } - s.logger.Debug("pull index config from manager", zap.String("address", mc.GetAddress())) value, err := mc.Get("/index_config") if err != nil { s.logger.Fatal(err.Error()) return } - - if value != nil { - s.indexConfig = config.NewIndexConfigFromMap(*value.(*map[string]interface{})) + indexMappingSrc, ok := (*value.(*map[string]interface{}))["index_mapping"] + if ok { + b, err := json.Marshal(indexMappingSrc) + if err != nil { + s.logger.Fatal(err.Error()) + return + } + s.indexMapping, err = indexutils.NewIndexMappingFromBytes(b) + if err != nil { + s.logger.Fatal(err.Error()) + return + } + } + indexTypeSrc, ok := (*value.(*map[string]interface{}))["index_type"] + if ok { + s.indexType = indexTypeSrc.(string) + } + indexStorageTypeSrc, ok := (*value.(*map[string]interface{}))["index_storage_type"] + if ok { + s.indexStorageType = indexStorageTypeSrc.(string) } } else if s.peerGrpcAddress != "" { pc, err := NewGRPCClient(s.peerGrpcAddress) @@ -159,9 +182,9 @@ func (s *Server) Start() { return } - if value != nil { - s.indexConfig = config.NewIndexConfigFromMap(value) - } + s.indexMapping = value["index_mapping"].(*mapping.IndexMappingImpl) + s.indexType = value["index_type"].(string) + s.indexStorageType = value["index_storage_type"].(string) } // bootstrap node? @@ -171,7 +194,7 @@ func (s *Server) Start() { var err error // create raft server - s.raftServer, err = NewRaftServer(s.node, s.dataDir, s.raftStorageType, s.indexConfig, bootstrap, s.logger) + s.raftServer, err = NewRaftServer(s.node, s.dataDir, s.raftStorageType, s.indexMapping, s.indexType, s.indexStorageType, bootstrap, s.logger) if err != nil { s.logger.Fatal(err.Error()) return diff --git a/indexer/server_test.go b/indexer/server_test.go index b527382..0cd7957 100644 --- a/indexer/server_test.go +++ b/indexer/server_test.go @@ -24,13 +24,13 @@ import ( "testing" "time" - "github.com/mosuka/blast/strutils" - "github.com/blevesearch/bleve" + "github.com/blevesearch/bleve/mapping" "github.com/mosuka/blast/errors" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/protobuf/index" + "github.com/mosuka/blast/strutils" "github.com/mosuka/blast/testutils" ) @@ -64,12 +64,14 @@ func TestServer_Start(t *testing.T) { }, } - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -114,12 +116,14 @@ func TestServer_LivenessProbe(t *testing.T) { }, } - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -211,12 +215,14 @@ func TestServer_GetNode(t *testing.T) { }, } - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -294,12 +300,14 @@ func TestServer_GetCluster(t *testing.T) { }, } - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -381,12 +389,14 @@ func TestServer_GetIndexMapping(t *testing.T) { }, } - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -414,23 +424,29 @@ func TestServer_GetIndexMapping(t *testing.T) { t.Fatalf("%v", err) } - expIndexMapping := indexConfig.IndexMapping + expIndexMapping := indexMapping + + actIndexConfigMap, err := client.GetIndexConfig() if err != nil { t.Fatalf("%v", err) } - actIndexConfigMap, err := client.GetIndexConfig() + actIndexMapping := actIndexConfigMap["index_mapping"].(*mapping.IndexMappingImpl) if err != nil { t.Fatalf("%v", err) } - actIndexMapping, err := indexutils.NewIndexMappingFromMap(actIndexConfigMap["index_mapping"].(map[string]interface{})) + exp, err := json.Marshal(expIndexMapping) + if err != nil { + t.Fatalf("%v", err) + } + act, err := json.Marshal(actIndexMapping) if err != nil { t.Fatalf("%v", err) } - if !reflect.DeepEqual(expIndexMapping, actIndexMapping) { - t.Fatalf("expected content to see %v, saw %v", expIndexMapping, actIndexMapping) + if !reflect.DeepEqual(exp, act) { + t.Fatalf("expected content to see %v, saw %v", exp, act) } } @@ -464,12 +480,14 @@ func TestServer_GetIndexType(t *testing.T) { }, } - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -497,10 +515,7 @@ func TestServer_GetIndexType(t *testing.T) { t.Fatalf("%v", err) } - expIndexType := indexConfig.IndexType - if err != nil { - t.Fatalf("%v", err) - } + expIndexType := indexType actIndexConfigMap, err := client.GetIndexConfig() if err != nil { @@ -544,12 +559,14 @@ func TestServer_GetIndexStorageType(t *testing.T) { }, } - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -577,10 +594,7 @@ func TestServer_GetIndexStorageType(t *testing.T) { t.Fatalf("%v", err) } - expIndexStorageType := indexConfig.IndexStorageType - if err != nil { - t.Fatalf("%v", err) - } + expIndexStorageType := indexStorageType actIndexConfigMap, err := client.GetIndexConfig() if err != nil { @@ -624,12 +638,14 @@ func TestServer_GetIndexStats(t *testing.T) { }, } - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -713,12 +729,14 @@ func TestServer_PutDocument(t *testing.T) { }, } - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -814,12 +832,14 @@ func TestServer_GetDocument(t *testing.T) { }, } - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -930,12 +950,14 @@ func TestServer_DeleteDocument(t *testing.T) { }, } - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -1075,12 +1097,14 @@ func TestServer_Search(t *testing.T) { }, } - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -1208,12 +1232,14 @@ func TestCluster_Start(t *testing.T) { }, } - indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType1 := "upside_down" + indexStorageType1 := "boltdb" - server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) + server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) defer func() { server1.Stop() }() @@ -1247,12 +1273,14 @@ func TestCluster_Start(t *testing.T) { }, } - indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType2 := "upside_down" + indexStorageType2 := "boltdb" - server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) + server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) defer func() { server2.Stop() }() @@ -1286,12 +1314,14 @@ func TestCluster_Start(t *testing.T) { }, } - indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType3 := "upside_down" + indexStorageType3 := "boltdb" - server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) + server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) defer func() { server3.Stop() }() @@ -1336,12 +1366,14 @@ func TestCluster_LivenessProbe(t *testing.T) { }, } - indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType1 := "upside_down" + indexStorageType1 := "boltdb" - server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) + server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) defer func() { server1.Stop() }() @@ -1375,12 +1407,14 @@ func TestCluster_LivenessProbe(t *testing.T) { }, } - indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType2 := "upside_down" + indexStorageType2 := "boltdb" - server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) + server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) defer func() { server2.Stop() }() @@ -1414,12 +1448,14 @@ func TestCluster_LivenessProbe(t *testing.T) { }, } - indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType3 := "upside_down" + indexStorageType3 := "boltdb" - server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) + server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) defer func() { server3.Stop() }() @@ -1586,12 +1622,14 @@ func TestCluster_GetNode(t *testing.T) { }, } - indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType1 := "upside_down" + indexStorageType1 := "boltdb" - server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) + server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) defer func() { server1.Stop() }() @@ -1625,12 +1663,14 @@ func TestCluster_GetNode(t *testing.T) { }, } - indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType2 := "upside_down" + indexStorageType2 := "boltdb" - server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) + server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) defer func() { server2.Stop() }() @@ -1664,12 +1704,14 @@ func TestCluster_GetNode(t *testing.T) { }, } - indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType3 := "upside_down" + indexStorageType3 := "boltdb" - server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) + server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) defer func() { server3.Stop() }() @@ -1792,12 +1834,14 @@ func TestCluster_GetCluster(t *testing.T) { }, } - indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType1 := "upside_down" + indexStorageType1 := "boltdb" - server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) + server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) defer func() { server1.Stop() }() @@ -1831,12 +1875,14 @@ func TestCluster_GetCluster(t *testing.T) { }, } - indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType2 := "upside_down" + indexStorageType2 := "boltdb" - server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) + server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) defer func() { server2.Stop() }() @@ -1870,12 +1916,14 @@ func TestCluster_GetCluster(t *testing.T) { }, } - indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType3 := "upside_down" + indexStorageType3 := "boltdb" - server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) + server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) defer func() { server3.Stop() }() diff --git a/manager/grpc_server.go b/manager/grpc_server.go index e49645b..453e240 100644 --- a/manager/grpc_server.go +++ b/manager/grpc_server.go @@ -15,6 +15,7 @@ package manager import ( + "fmt" "net" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" @@ -92,3 +93,18 @@ func (s *GRPCServer) Stop() error { return nil } + +func (s *GRPCServer) GetAddress() (string, error) { + tcpAddr, err := net.ResolveTCPAddr("tcp", s.listener.Addr().String()) + if err != nil { + return "", err + } + + v4Addr := "" + if tcpAddr.IP.To4() != nil { + v4Addr = tcpAddr.IP.To4().String() + } + port := tcpAddr.Port + + return fmt.Sprintf("%s:%d", v4Addr, port), nil +} diff --git a/manager/http_server.go b/manager/http_server.go index 33bd0fc..f64b7c6 100644 --- a/manager/http_server.go +++ b/manager/http_server.go @@ -15,6 +15,7 @@ package manager import ( + "fmt" "net" "net/http" @@ -67,3 +68,18 @@ func (s *HTTPServer) Stop() error { return nil } + +func (s *HTTPServer) GetAddress() (string, error) { + tcpAddr, err := net.ResolveTCPAddr("tcp", s.listener.Addr().String()) + if err != nil { + return "", err + } + + v4Addr := "" + if tcpAddr.IP.To4() != nil { + v4Addr = tcpAddr.IP.To4().String() + } + port := tcpAddr.Port + + return fmt.Sprintf("%s:%d", v4Addr, port), nil +} diff --git a/manager/raft_server.go b/manager/raft_server.go index 5a75f47..46bc079 100644 --- a/manager/raft_server.go +++ b/manager/raft_server.go @@ -24,11 +24,11 @@ import ( "sync" "time" + "github.com/blevesearch/bleve/mapping" "github.com/hashicorp/raft" raftboltdb "github.com/hashicorp/raft-boltdb" raftbadgerdb "github.com/markthethomas/raft-badger" _ "github.com/mosuka/blast/builtins" - "github.com/mosuka/blast/config" blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/protobuf/management" "go.uber.org/zap" @@ -36,26 +36,31 @@ import ( ) type RaftServer struct { - node *management.Node - dataDir string - raftStorageType string - indexConfig *config.IndexConfig - bootstrap bool - logger *zap.Logger - - raft *raft.Raft - fsm *RaftFSM - mu sync.RWMutex + node *management.Node + dataDir string + raftStorageType string + indexMapping *mapping.IndexMappingImpl + indexType string + indexStorageType string + bootstrap bool + logger *zap.Logger + + transport *raft.NetworkTransport + raft *raft.Raft + fsm *RaftFSM + mu sync.RWMutex } -func NewRaftServer(node *management.Node, dataDir string, raftStorageType string, indexConfig *config.IndexConfig, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { +func NewRaftServer(node *management.Node, dataDir string, raftStorageType string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { return &RaftServer{ - node: node, - dataDir: dataDir, - raftStorageType: raftStorageType, - indexConfig: indexConfig, - bootstrap: bootstrap, - logger: logger, + node: node, + dataDir: dataDir, + raftStorageType: raftStorageType, + indexMapping: indexMapping, + indexType: indexType, + indexStorageType: indexStorageType, + bootstrap: bootstrap, + logger: logger, }, nil } @@ -94,7 +99,7 @@ func (s *RaftServer) Start() error { } s.logger.Info("create TCP transport", zap.String("bind_addr", s.node.BindAddress)) - transport, err := raft.NewTCPTransport(s.node.BindAddress, addr, 3, 10*time.Second, ioutil.Discard) + s.transport, err = raft.NewTCPTransport(s.node.BindAddress, addr, 3, 10*time.Second, ioutil.Discard) if err != nil { s.logger.Fatal(err.Error()) return err @@ -182,7 +187,7 @@ func (s *RaftServer) Start() error { } s.logger.Info("create Raft machine") - s.raft, err = raft.NewRaft(raftConfig, s.fsm, logStore, stableStore, snapshotStore, transport) + s.raft, err = raft.NewRaft(raftConfig, s.fsm, logStore, stableStore, snapshotStore, s.transport) if err != nil { s.logger.Fatal(err.Error()) return err @@ -194,7 +199,7 @@ func (s *RaftServer) Start() error { Servers: []raft.Server{ { ID: raftConfig.LocalID, - Address: transport.LocalAddr(), + Address: s.transport.LocalAddr(), }, }, } @@ -217,11 +222,27 @@ func (s *RaftServer) Start() error { // set index config s.logger.Info("register index config") - err := s.SetValue("index_config", s.indexConfig.ToMap()) + b, err := json.Marshal(s.indexMapping) if err != nil { s.logger.Error(err.Error()) return err } + var indexMappingMap map[string]interface{} + err = json.Unmarshal(b, &indexMappingMap) + if err != nil { + s.logger.Error(err.Error()) + return err + } + indexConfig := map[string]interface{}{ + "index_mapping": indexMappingMap, + "index_type": s.indexType, + "index_storage_type": s.indexStorageType, + } + err = s.SetValue("index_config", indexConfig) + if err != nil { + s.logger.Error(err.Error(), zap.String("key", "index_config")) + return err + } } return nil @@ -292,6 +313,10 @@ func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { return "", blasterrors.ErrNotFoundLeader } +func (s *RaftServer) NodeAddress() string { + return string(s.transport.LocalAddr()) +} + func (s *RaftServer) NodeID() string { return s.node.Id } diff --git a/manager/server.go b/manager/server.go index df4bba7..809d3c4 100644 --- a/manager/server.go +++ b/manager/server.go @@ -15,21 +15,23 @@ package manager import ( + "github.com/blevesearch/bleve/mapping" accesslog "github.com/mash/go-accesslog" - "github.com/mosuka/blast/config" "github.com/mosuka/blast/protobuf/management" "go.uber.org/zap" ) type Server struct { - peerGrpcAddr string - node *management.Node - dataDir string - raftStorageType string - indexConfig *config.IndexConfig - logger *zap.Logger - grpcLogger *zap.Logger - httpLogger accesslog.Logger + peerGrpcAddr string + node *management.Node + dataDir string + raftStorageType string + indexMapping *mapping.IndexMappingImpl + indexType string + indexStorageType string + logger *zap.Logger + grpcLogger *zap.Logger + httpLogger accesslog.Logger raftServer *RaftServer grpcService *GRPCService @@ -38,16 +40,18 @@ type Server struct { httpServer *HTTPServer } -func NewServer(peerGrpcAddr string, node *management.Node, dataDir string, raftStorageType string, indexConfig *config.IndexConfig, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { +func NewServer(peerGrpcAddr string, node *management.Node, dataDir string, raftStorageType string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { return &Server{ - peerGrpcAddr: peerGrpcAddr, - node: node, - dataDir: dataDir, - raftStorageType: raftStorageType, - indexConfig: indexConfig, - logger: logger, - grpcLogger: grpcLogger, - httpLogger: httpLogger, + peerGrpcAddr: peerGrpcAddr, + node: node, + dataDir: dataDir, + raftStorageType: raftStorageType, + indexMapping: indexMapping, + indexType: indexType, + indexStorageType: indexStorageType, + logger: logger, + grpcLogger: grpcLogger, + httpLogger: httpLogger, }, nil } @@ -59,7 +63,7 @@ func (s *Server) Start() { s.logger.Info("bootstrap", zap.Bool("bootstrap", bootstrap)) // create raft server - s.raftServer, err = NewRaftServer(s.node, s.dataDir, s.raftStorageType, s.indexConfig, bootstrap, s.logger) + s.raftServer, err = NewRaftServer(s.node, s.dataDir, s.raftStorageType, s.indexMapping, s.indexType, s.indexStorageType, bootstrap, s.logger) if err != nil { s.logger.Fatal(err.Error()) return @@ -179,3 +183,25 @@ func (s *Server) Stop() { s.logger.Error(err.Error()) } } + +func (s *Server) BindAddress() string { + return s.raftServer.NodeAddress() +} + +func (s *Server) GrpcAddress() string { + address, err := s.grpcServer.GetAddress() + if err != nil { + return "" + } + + return address +} + +func (s *Server) HttpAddress() string { + address, err := s.httpServer.GetAddress() + if err != nil { + return "" + } + + return address +} diff --git a/manager/server_test.go b/manager/server_test.go index b61bdae..855a0f1 100644 --- a/manager/server_test.go +++ b/manager/server_test.go @@ -23,6 +23,7 @@ import ( "time" blasterrors "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/protobuf/management" "github.com/mosuka/blast/strutils" @@ -57,13 +58,15 @@ func TestServer_Start(t *testing.T) { }, } - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -108,13 +111,15 @@ func TestServer_HealthCheck(t *testing.T) { }, } - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -206,13 +211,15 @@ func TestServer_GetNode(t *testing.T) { }, } - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -290,13 +297,15 @@ func TestServer_GetCluster(t *testing.T) { }, } - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -378,13 +387,15 @@ func TestServer_SetState(t *testing.T) { }, } - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -463,13 +474,15 @@ func TestServer_GetState(t *testing.T) { }, } - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -548,13 +561,15 @@ func TestServer_DeleteState(t *testing.T) { }, } - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType := "upside_down" + indexStorageType := "boltdb" // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) defer func() { if server != nil { server.Stop() @@ -654,13 +669,15 @@ func TestCluster_Start(t *testing.T) { }, } - indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType1 := "upside_down" + indexStorageType1 := "boltdb" // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) + server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -694,13 +711,15 @@ func TestCluster_Start(t *testing.T) { }, } - indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType2 := "upside_down" + indexStorageType2 := "boltdb" // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) + server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -734,13 +753,15 @@ func TestCluster_Start(t *testing.T) { }, } - indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType3 := "upside_down" + indexStorageType3 := "boltdb" // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) + server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() @@ -785,13 +806,15 @@ func TestCluster_HealthCheck(t *testing.T) { }, } - indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType1 := "upside_down" + indexStorageType1 := "boltdb" // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) + server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -825,13 +848,15 @@ func TestCluster_HealthCheck(t *testing.T) { }, } - indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType2 := "upside_down" + indexStorageType2 := "boltdb" // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) + server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -865,13 +890,15 @@ func TestCluster_HealthCheck(t *testing.T) { }, } - indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType3 := "upside_down" + indexStorageType3 := "boltdb" // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) + server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() @@ -1038,13 +1065,15 @@ func TestCluster_GetNode(t *testing.T) { }, } - indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType1 := "upside_down" + indexStorageType1 := "boltdb" // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) + server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -1078,13 +1107,15 @@ func TestCluster_GetNode(t *testing.T) { }, } - indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType2 := "upside_down" + indexStorageType2 := "boltdb" // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) + server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -1118,13 +1149,15 @@ func TestCluster_GetNode(t *testing.T) { }, } - indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType3 := "upside_down" + indexStorageType3 := "boltdb" // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) + server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() @@ -1247,13 +1280,15 @@ func TestCluster_GetCluster(t *testing.T) { }, } - indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType1 := "upside_down" + indexStorageType1 := "boltdb" // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) + server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -1287,13 +1322,15 @@ func TestCluster_GetCluster(t *testing.T) { }, } - indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType2 := "upside_down" + indexStorageType2 := "boltdb" // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) + server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -1327,13 +1364,15 @@ func TestCluster_GetCluster(t *testing.T) { }, } - indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType3 := "upside_down" + indexStorageType3 := "boltdb" // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) + server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() @@ -1522,13 +1561,15 @@ func TestCluster_SetState(t *testing.T) { }, } - indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType1 := "upside_down" + indexStorageType1 := "boltdb" // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) + server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -1562,13 +1603,15 @@ func TestCluster_SetState(t *testing.T) { }, } - indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType2 := "upside_down" + indexStorageType2 := "boltdb" // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) + server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -1602,13 +1645,15 @@ func TestCluster_SetState(t *testing.T) { }, } - indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType3 := "upside_down" + indexStorageType3 := "boltdb" // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) + server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() @@ -1781,13 +1826,15 @@ func TestCluster_GetState(t *testing.T) { }, } - indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType1 := "upside_down" + indexStorageType1 := "boltdb" // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) + server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -1821,13 +1868,15 @@ func TestCluster_GetState(t *testing.T) { }, } - indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType2 := "upside_down" + indexStorageType2 := "boltdb" // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) + server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -1861,13 +1910,15 @@ func TestCluster_GetState(t *testing.T) { }, } - indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType3 := "upside_down" + indexStorageType3 := "boltdb" // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) + server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() @@ -2040,13 +2091,15 @@ func TestCluster_DeleteState(t *testing.T) { }, } - indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType1 := "upside_down" + indexStorageType1 := "boltdb" // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) + server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) defer func() { if server1 != nil { server1.Stop() @@ -2080,13 +2133,15 @@ func TestCluster_DeleteState(t *testing.T) { }, } - indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType2 := "upside_down" + indexStorageType2 := "boltdb" // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) + server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) defer func() { if server2 != nil { server2.Stop() @@ -2120,13 +2175,15 @@ func TestCluster_DeleteState(t *testing.T) { }, } - indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) if err != nil { t.Fatalf("%v", err) } + indexType3 := "upside_down" + indexStorageType3 := "boltdb" // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) + server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) defer func() { if server3 != nil { server3.Stop() diff --git a/protobuf/index/index.pb.go b/protobuf/index/index.pb.go index 8b42e8c..56cf8d5 100644 --- a/protobuf/index/index.pb.go +++ b/protobuf/index/index.pb.go @@ -912,18 +912,73 @@ func (m *SearchResponse) GetSearchResult() *any.Any { return nil } -type GetIndexConfigResponse struct { - IndexConfig *any.Any `protobuf:"bytes,1,opt,name=index_config,json=indexConfig,proto3" json:"index_config,omitempty"` +type IndexConfig struct { + IndexMapping *any.Any `protobuf:"bytes,1,opt,name=index_mapping,json=indexMapping,proto3" json:"index_mapping,omitempty"` + IndexType string `protobuf:"bytes,2,opt,name=index_type,json=indexType,proto3" json:"index_type,omitempty"` + IndexStorageType string `protobuf:"bytes,3,opt,name=index_storage_type,json=indexStorageType,proto3" json:"index_storage_type,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } +func (m *IndexConfig) Reset() { *m = IndexConfig{} } +func (m *IndexConfig) String() string { return proto.CompactTextString(m) } +func (*IndexConfig) ProtoMessage() {} +func (*IndexConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{18} +} + +func (m *IndexConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IndexConfig.Unmarshal(m, b) +} +func (m *IndexConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IndexConfig.Marshal(b, m, deterministic) +} +func (m *IndexConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_IndexConfig.Merge(m, src) +} +func (m *IndexConfig) XXX_Size() int { + return xxx_messageInfo_IndexConfig.Size(m) +} +func (m *IndexConfig) XXX_DiscardUnknown() { + xxx_messageInfo_IndexConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_IndexConfig proto.InternalMessageInfo + +func (m *IndexConfig) GetIndexMapping() *any.Any { + if m != nil { + return m.IndexMapping + } + return nil +} + +func (m *IndexConfig) GetIndexType() string { + if m != nil { + return m.IndexType + } + return "" +} + +func (m *IndexConfig) GetIndexStorageType() string { + if m != nil { + return m.IndexStorageType + } + return "" +} + +type GetIndexConfigResponse struct { + IndexConfig *IndexConfig `protobuf:"bytes,1,opt,name=index_config,json=indexConfig,proto3" json:"index_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + func (m *GetIndexConfigResponse) Reset() { *m = GetIndexConfigResponse{} } func (m *GetIndexConfigResponse) String() string { return proto.CompactTextString(m) } func (*GetIndexConfigResponse) ProtoMessage() {} func (*GetIndexConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{18} + return fileDescriptor_7b2daf652facb3ae, []int{19} } func (m *GetIndexConfigResponse) XXX_Unmarshal(b []byte) error { @@ -944,7 +999,7 @@ func (m *GetIndexConfigResponse) XXX_DiscardUnknown() { var xxx_messageInfo_GetIndexConfigResponse proto.InternalMessageInfo -func (m *GetIndexConfigResponse) GetIndexConfig() *any.Any { +func (m *GetIndexConfigResponse) GetIndexConfig() *IndexConfig { if m != nil { return m.IndexConfig } @@ -962,7 +1017,7 @@ func (m *GetIndexStatsResponse) Reset() { *m = GetIndexStatsResponse{} } func (m *GetIndexStatsResponse) String() string { return proto.CompactTextString(m) } func (*GetIndexStatsResponse) ProtoMessage() {} func (*GetIndexStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{19} + return fileDescriptor_7b2daf652facb3ae, []int{20} } func (m *GetIndexStatsResponse) XXX_Unmarshal(b []byte) error { @@ -1003,7 +1058,7 @@ func (m *Document) Reset() { *m = Document{} } func (m *Document) String() string { return proto.CompactTextString(m) } func (*Document) ProtoMessage() {} func (*Document) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{20} + return fileDescriptor_7b2daf652facb3ae, []int{21} } func (m *Document) XXX_Unmarshal(b []byte) error { @@ -1062,6 +1117,7 @@ func init() { proto.RegisterType((*DeleteDocumentResponse)(nil), "index.DeleteDocumentResponse") proto.RegisterType((*SearchRequest)(nil), "index.SearchRequest") proto.RegisterType((*SearchResponse)(nil), "index.SearchResponse") + proto.RegisterType((*IndexConfig)(nil), "index.IndexConfig") proto.RegisterType((*GetIndexConfigResponse)(nil), "index.GetIndexConfigResponse") proto.RegisterType((*GetIndexStatsResponse)(nil), "index.GetIndexStatsResponse") proto.RegisterType((*Document)(nil), "index.Document") @@ -1070,74 +1126,78 @@ func init() { func init() { proto.RegisterFile("protobuf/index/index.proto", fileDescriptor_7b2daf652facb3ae) } var fileDescriptor_7b2daf652facb3ae = []byte{ - // 1067 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x5d, 0x53, 0xdb, 0x46, - 0x14, 0xb5, 0x6c, 0x0b, 0x9c, 0x6b, 0x6c, 0xdc, 0x0d, 0x90, 0x44, 0x09, 0x6d, 0xd8, 0xa6, 0x8d, - 0x67, 0xda, 0xda, 0x1d, 0x32, 0x4c, 0x9a, 0xb4, 0x9d, 0x8e, 0x63, 0x29, 0xe0, 0xe0, 0x0a, 0x22, - 0x43, 0x98, 0xf4, 0x85, 0x91, 0xad, 0x05, 0x6b, 0x30, 0x92, 0x6b, 0xad, 0x99, 0xf2, 0xd8, 0xd7, - 0xfe, 0x92, 0xf6, 0xe7, 0xf4, 0xbd, 0x3f, 0xa6, 0xb3, 0x1f, 0x12, 0x92, 0xb0, 0x44, 0x67, 0xfa, - 0xc2, 0xb0, 0x77, 0xcf, 0x3d, 0x7b, 0xee, 0xdd, 0xbb, 0x47, 0x06, 0x6d, 0x3a, 0xf3, 0xa9, 0x3f, - 0x9c, 0x9f, 0xb5, 0x5d, 0xcf, 0x21, 0xbf, 0x89, 0xbf, 0x2d, 0x1e, 0x44, 0x2a, 0x5f, 0x68, 0x8f, - 0xce, 0x7d, 0xff, 0x7c, 0x42, 0xda, 0x11, 0xd2, 0xf6, 0xae, 0x05, 0x42, 0x7b, 0x9c, 0xde, 0x22, - 0x97, 0x53, 0x2a, 0x37, 0xf1, 0x1f, 0x0a, 0x6c, 0x98, 0xbe, 0x43, 0xf6, 0x88, 0x3d, 0xa1, 0xe3, - 0xee, 0x98, 0x8c, 0x2e, 0x2c, 0xf2, 0xeb, 0x9c, 0x04, 0x14, 0xbd, 0x02, 0x75, 0x3a, 0xf3, 0x87, - 0xe4, 0xa1, 0xf2, 0x54, 0x69, 0xd6, 0xb7, 0x3f, 0x6f, 0x89, 0x63, 0x17, 0xa3, 0x5b, 0x87, 0x0c, - 0x6a, 0x89, 0x0c, 0xbc, 0x03, 0x2a, 0x5f, 0xa3, 0x55, 0xa8, 0xee, 0x19, 0x9d, 0xfe, 0xd1, 0x5e, - 0xcf, 0x34, 0x06, 0x83, 0x46, 0x01, 0xad, 0x40, 0xa5, 0xdf, 0xfb, 0x60, 0xf0, 0x95, 0x82, 0x6a, - 0x70, 0xcf, 0x32, 0x3a, 0xba, 0xd8, 0x2c, 0xe2, 0xbf, 0x14, 0x78, 0x70, 0x8b, 0x3e, 0x98, 0xfa, - 0x5e, 0x40, 0xd0, 0x6b, 0x50, 0x03, 0x6a, 0xd3, 0x50, 0xcd, 0xb3, 0x2c, 0x35, 0x02, 0xde, 0x1a, - 0x30, 0xac, 0x25, 0x52, 0xb0, 0x05, 0x2a, 0x5f, 0xa3, 0x2a, 0x2c, 0x0b, 0x39, 0x1f, 0x1b, 0x05, - 0x76, 0xf8, 0xb1, 0x19, 0x2e, 0x15, 0x74, 0x0f, 0xd4, 0x0e, 0x93, 0xd6, 0x28, 0xa2, 0x0a, 0x94, - 0x75, 0xa3, 0xa3, 0x37, 0x4a, 0x2c, 0xc8, 0x04, 0x7e, 0x6c, 0x94, 0x19, 0xdc, 0x3c, 0x38, 0x3a, - 0x15, 0x4b, 0x15, 0x1f, 0x42, 0xe5, 0x67, 0x42, 0x6d, 0xc7, 0xa6, 0x36, 0xda, 0x82, 0x95, 0xf3, - 0xd9, 0x74, 0x74, 0x6a, 0x3b, 0xce, 0x8c, 0x04, 0x01, 0x97, 0x78, 0xcf, 0xaa, 0xb2, 0x58, 0x47, - 0x84, 0x18, 0x64, 0x4c, 0xe9, 0x34, 0x82, 0x14, 0x05, 0x84, 0xc5, 0x24, 0x04, 0xff, 0xa3, 0x40, - 0x99, 0x95, 0x83, 0xea, 0x50, 0x74, 0x1d, 0x49, 0x52, 0x74, 0x1d, 0x96, 0x3b, 0x74, 0x3d, 0x27, - 0x9d, 0xcb, 0x62, 0x21, 0xfd, 0xf3, 0xb0, 0x3b, 0x25, 0xde, 0x9d, 0x4f, 0x62, 0xdd, 0x49, 0xb4, - 0x02, 0x7d, 0x05, 0x95, 0x4b, 0x29, 0xfb, 0x61, 0xf9, 0xa9, 0xd2, 0xac, 0x6e, 0xaf, 0x4a, 0x6c, - 0x58, 0x8d, 0x15, 0x01, 0xf0, 0x7e, 0xac, 0x6f, 0xc7, 0xe6, 0xbe, 0x79, 0x70, 0x62, 0x8a, 0x2b, - 0x7c, 0x7b, 0xd0, 0xef, 0x1f, 0x9c, 0x18, 0x96, 0xb8, 0xc2, 0x6e, 0xc7, 0xd4, 0x7b, 0x7a, 0xe7, - 0x88, 0xb5, 0x0e, 0x60, 0xa9, 0x6f, 0x74, 0x74, 0xc3, 0x6a, 0x94, 0x18, 0x70, 0xb0, 0x77, 0x7c, - 0xa4, 0xb3, 0xb4, 0x32, 0xfe, 0x5d, 0x81, 0xe5, 0xee, 0x64, 0x1e, 0x50, 0x32, 0x43, 0x6d, 0x50, - 0x3d, 0xdf, 0x21, 0xac, 0x53, 0xa5, 0x66, 0x75, 0xfb, 0x91, 0x94, 0x20, 0xb7, 0xb9, 0xec, 0xc0, - 0xf0, 0xe8, 0xec, 0xda, 0x12, 0x38, 0xcd, 0x00, 0xb8, 0x09, 0xa2, 0x06, 0x94, 0x2e, 0xc8, 0xb5, - 0xec, 0x10, 0xfb, 0x17, 0x6d, 0x81, 0x7a, 0x65, 0x4f, 0xe6, 0x84, 0xf7, 0xa6, 0xba, 0x5d, 0x8d, - 0xd5, 0x6f, 0x89, 0x9d, 0xd7, 0xc5, 0xef, 0x14, 0xfc, 0x02, 0x1a, 0x2c, 0xd4, 0xf3, 0xce, 0xfc, - 0x68, 0xb0, 0x3e, 0x83, 0x32, 0x3b, 0x83, 0xb3, 0xa5, 0x32, 0xf9, 0x06, 0xde, 0x01, 0x24, 0x85, - 0xbd, 0xf3, 0x5d, 0x2f, 0x7c, 0x1d, 0x77, 0xa6, 0x7d, 0x01, 0xf7, 0x65, 0x5a, 0x9f, 0xd8, 0x57, - 0x24, 0xcc, 0x4b, 0x5d, 0x2e, 0xfe, 0x29, 0x82, 0x25, 0x54, 0x35, 0x61, 0x79, 0x24, 0xc2, 0xf2, - 0x84, 0x7a, 0xb2, 0x47, 0x56, 0xb8, 0x8d, 0xff, 0x56, 0x60, 0x4d, 0x06, 0x4f, 0x6c, 0x3a, 0x1a, - 0x47, 0x14, 0x2f, 0x41, 0x25, 0x57, 0xc4, 0xa3, 0xf2, 0xc5, 0x6c, 0x25, 0x09, 0x12, 0xd8, 0x96, - 0xc1, 0x80, 0x96, 0xc0, 0x47, 0xa5, 0x15, 0x33, 0x4a, 0x8b, 0x8b, 0x2b, 0xe5, 0x8b, 0xdb, 0x01, - 0x95, 0x53, 0x27, 0x27, 0xa8, 0x02, 0xe5, 0x77, 0x07, 0x3d, 0x53, 0x3c, 0xba, 0xbe, 0xd1, 0xf9, - 0x20, 0x27, 0xe7, 0xf8, 0x90, 0x4f, 0x51, 0x09, 0x3f, 0x03, 0xb4, 0x4b, 0xa8, 0xee, 0x8f, 0xe6, - 0x97, 0x4c, 0x57, 0x46, 0xeb, 0xba, 0x70, 0x3f, 0x81, 0x92, 0x75, 0x7f, 0x0d, 0x4b, 0x67, 0x2e, - 0x99, 0x38, 0x81, 0xec, 0xdc, 0x5a, 0x4b, 0x18, 0x60, 0x2b, 0x34, 0xc0, 0x56, 0xc7, 0xbb, 0xb6, - 0x24, 0x06, 0x1f, 0xc1, 0x5a, 0x8f, 0x69, 0xbf, 0xe3, 0xb0, 0x18, 0x6b, 0xf1, 0x3f, 0xb0, 0x7e, - 0x03, 0xeb, 0x29, 0x56, 0x29, 0x6e, 0x0d, 0xd4, 0x91, 0x3f, 0x97, 0x97, 0xa2, 0x5a, 0x62, 0x81, - 0x9f, 0xc3, 0xba, 0x4e, 0x26, 0x84, 0x92, 0xbb, 0x4a, 0x6e, 0xc1, 0x46, 0x1a, 0x98, 0x4b, 0xdc, - 0x87, 0xda, 0x80, 0xd8, 0x33, 0x76, 0xd3, 0x82, 0xf0, 0x7b, 0xa8, 0x07, 0x3c, 0x70, 0x3a, 0x13, - 0x91, 0xdc, 0x26, 0xd5, 0x82, 0x78, 0x32, 0xde, 0x87, 0x7a, 0xc8, 0x26, 0x4f, 0x7d, 0x05, 0xb5, - 0x88, 0x2e, 0x98, 0x4f, 0xf2, 0xd9, 0x56, 0x42, 0x36, 0x86, 0xc4, 0xef, 0x61, 0x63, 0x97, 0x50, - 0xde, 0xa5, 0xae, 0xef, 0x9d, 0xb9, 0xe7, 0xb1, 0xc1, 0x5d, 0xe1, 0xe3, 0x74, 0x3a, 0xe2, 0xf1, - 0x5c, 0xce, 0xaa, 0x7b, 0x43, 0x80, 0x4d, 0x58, 0x0f, 0x29, 0x99, 0x6f, 0x05, 0x11, 0xe3, 0x0e, - 0x08, 0xdc, 0x29, 0x33, 0xc1, 0xfc, 0xb9, 0x00, 0x37, 0x4a, 0xc7, 0x7b, 0x50, 0x09, 0xfb, 0xfc, - 0xff, 0xe6, 0x61, 0xfb, 0xcf, 0x65, 0x50, 0xb9, 0x2e, 0x64, 0xc1, 0x6a, 0xea, 0x9b, 0x85, 0x36, - 0x73, 0xbf, 0xac, 0xda, 0xa7, 0xf9, 0x9f, 0x3a, 0x5c, 0x40, 0x3f, 0x42, 0x25, 0xb4, 0x35, 0xb4, - 0x71, 0x4b, 0x87, 0xc1, 0x3e, 0xf7, 0xda, 0x83, 0x18, 0x4b, 0xdc, 0x69, 0x70, 0x01, 0xbd, 0x81, - 0x6a, 0xcc, 0xe0, 0x50, 0xca, 0x8d, 0x63, 0xa6, 0xa7, 0x65, 0x90, 0xe3, 0x02, 0xd2, 0x61, 0x25, - 0xee, 0x76, 0x48, 0x4b, 0x92, 0xc4, 0x2d, 0x30, 0x87, 0xa5, 0x1b, 0x29, 0xc9, 0xad, 0x25, 0x45, - 0x9e, 0x2a, 0x67, 0x37, 0x92, 0xc2, 0x3d, 0x2e, 0x93, 0xe5, 0x71, 0x8e, 0x21, 0xe2, 0xc2, 0xb7, - 0x0a, 0x7a, 0x0b, 0xd5, 0x98, 0xbf, 0x44, 0x7d, 0xb9, 0xed, 0x4c, 0x91, 0xa0, 0x05, 0x76, 0x84, - 0x0b, 0xc8, 0x84, 0x5a, 0xc2, 0x0c, 0x50, 0x78, 0xf2, 0x22, 0xe3, 0xd1, 0x9e, 0x2c, 0xde, 0x0c, - 0xd9, 0x9a, 0x0a, 0x7a, 0x0f, 0xf5, 0xa4, 0x09, 0xa0, 0x30, 0x67, 0xa1, 0x89, 0x68, 0x9b, 0x19, - 0xbb, 0x31, 0xca, 0x97, 0xb0, 0x24, 0x5e, 0x36, 0x5a, 0x93, 0xe0, 0x84, 0x6d, 0x68, 0xeb, 0xa9, - 0x68, 0x54, 0x5b, 0x0f, 0xea, 0xc9, 0x57, 0x9c, 0xd9, 0xee, 0xcd, 0x9b, 0x1e, 0x2d, 0x78, 0xf4, - 0xfc, 0xde, 0x6a, 0x89, 0xd7, 0x9b, 0xc9, 0xf4, 0x24, 0xc5, 0x94, 0x78, 0xeb, 0xb8, 0x80, 0x7e, - 0x80, 0xca, 0xc0, 0xb3, 0xa7, 0xc1, 0xd8, 0xa7, 0x99, 0x1c, 0x99, 0x33, 0xf8, 0xa6, 0xf9, 0xcb, - 0x97, 0xe7, 0x2e, 0x1d, 0xcf, 0x87, 0xad, 0x91, 0x7f, 0xd9, 0xbe, 0xf4, 0x83, 0xf9, 0x85, 0xdd, - 0x1e, 0x4e, 0xec, 0x80, 0xb6, 0x93, 0x3f, 0xc3, 0x87, 0x4b, 0x7c, 0xfd, 0xe2, 0xdf, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x05, 0xe3, 0xab, 0x2e, 0x9f, 0x0b, 0x00, 0x00, + // 1129 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x5f, 0x73, 0xda, 0xc6, + 0x17, 0x45, 0x80, 0x6c, 0x7c, 0x65, 0x08, 0xbf, 0x8d, 0xed, 0x24, 0x24, 0xfe, 0x35, 0xde, 0xa6, + 0x0d, 0x33, 0x6d, 0xa1, 0xe3, 0x8c, 0x27, 0x4d, 0xda, 0x4e, 0x87, 0x80, 0x62, 0x13, 0x13, 0xe1, + 0x0a, 0x1c, 0x4f, 0xfa, 0xe2, 0x11, 0xb0, 0x06, 0x8d, 0x41, 0x52, 0xd1, 0xe2, 0x29, 0x8f, 0x7d, + 0xed, 0x7b, 0xbf, 0x43, 0xfb, 0x71, 0xfa, 0xde, 0x0f, 0xd3, 0xd9, 0x3f, 0x92, 0x25, 0xd9, 0xc8, + 0x9d, 0xe9, 0x8b, 0xc7, 0x7b, 0xef, 0xb9, 0x67, 0xcf, 0xde, 0xbd, 0x7b, 0x64, 0x43, 0xc5, 0x9b, + 0xbb, 0xd4, 0x1d, 0x2c, 0x2e, 0xea, 0xb6, 0x33, 0x22, 0xbf, 0x88, 0x9f, 0x35, 0x1e, 0x44, 0x2a, + 0x5f, 0x54, 0x1e, 0x8d, 0x5d, 0x77, 0x3c, 0x25, 0xf5, 0x10, 0x69, 0x39, 0x4b, 0x81, 0xa8, 0x3c, + 0x4e, 0xa6, 0xc8, 0xcc, 0xa3, 0x32, 0x89, 0x7f, 0x53, 0x60, 0xc7, 0x70, 0x47, 0xe4, 0x88, 0x58, + 0x53, 0x3a, 0x69, 0x4e, 0xc8, 0xf0, 0xd2, 0x24, 0x3f, 0x2f, 0x88, 0x4f, 0xd1, 0x2b, 0x50, 0xbd, + 0xb9, 0x3b, 0x20, 0x0f, 0x95, 0xa7, 0x4a, 0xb5, 0xb4, 0xff, 0x69, 0x4d, 0x6c, 0x7b, 0x3b, 0xba, + 0x76, 0xc2, 0xa0, 0xa6, 0xa8, 0xc0, 0x07, 0xa0, 0xf2, 0x35, 0xba, 0x07, 0xda, 0x91, 0xde, 0xe8, + 0xf4, 0x8f, 0xda, 0x86, 0xde, 0xeb, 0x95, 0x33, 0x68, 0x13, 0x0a, 0x9d, 0xf6, 0x07, 0x9d, 0xaf, + 0x14, 0x54, 0x84, 0x0d, 0x53, 0x6f, 0xb4, 0x44, 0x32, 0x8b, 0xff, 0x54, 0xe0, 0xc1, 0x0d, 0x7a, + 0xdf, 0x73, 0x1d, 0x9f, 0xa0, 0xd7, 0xa0, 0xfa, 0xd4, 0xa2, 0x81, 0x9a, 0x67, 0xab, 0xd4, 0x08, + 0x78, 0xad, 0xc7, 0xb0, 0xa6, 0x28, 0xc1, 0x26, 0xa8, 0x7c, 0x8d, 0x34, 0x58, 0x17, 0x72, 0x3e, + 0x96, 0x33, 0x6c, 0xf3, 0x53, 0x23, 0x58, 0x2a, 0x68, 0x03, 0xd4, 0x06, 0x93, 0x56, 0xce, 0xa2, + 0x02, 0xe4, 0x5b, 0x7a, 0xa3, 0x55, 0xce, 0xb1, 0x20, 0x13, 0xf8, 0xb1, 0x9c, 0x67, 0x70, 0xa3, + 0xdb, 0x3f, 0x17, 0x4b, 0x15, 0x9f, 0x40, 0xe1, 0x3d, 0xa1, 0xd6, 0xc8, 0xa2, 0x16, 0xda, 0x83, + 0xcd, 0xf1, 0xdc, 0x1b, 0x9e, 0x5b, 0xa3, 0xd1, 0x9c, 0xf8, 0x3e, 0x97, 0xb8, 0x61, 0x6a, 0x2c, + 0xd6, 0x10, 0x21, 0x06, 0x99, 0x50, 0xea, 0x85, 0x90, 0xac, 0x80, 0xb0, 0x98, 0x84, 0xe0, 0xbf, + 0x15, 0xc8, 0xb3, 0xe3, 0xa0, 0x12, 0x64, 0xed, 0x91, 0x24, 0xc9, 0xda, 0x23, 0x56, 0x3b, 0xb0, + 0x9d, 0x51, 0xb2, 0x96, 0xc5, 0x02, 0xfa, 0xe7, 0x41, 0x77, 0x72, 0xbc, 0x3b, 0xff, 0x8b, 0x74, + 0x27, 0xd6, 0x0a, 0xf4, 0x05, 0x14, 0x66, 0x52, 0xf6, 0xc3, 0xfc, 0x53, 0xa5, 0xaa, 0xed, 0xdf, + 0x93, 0xd8, 0xe0, 0x34, 0x66, 0x08, 0xc0, 0xc7, 0x91, 0xbe, 0x9d, 0x1a, 0xc7, 0x46, 0xf7, 0xcc, + 0x10, 0x57, 0xf8, 0xb6, 0xdb, 0xe9, 0x74, 0xcf, 0x74, 0x53, 0x5c, 0x61, 0xb3, 0x61, 0xb4, 0xda, + 0xad, 0x46, 0x9f, 0xb5, 0x0e, 0x60, 0xad, 0xa3, 0x37, 0x5a, 0xba, 0x59, 0xce, 0x31, 0x60, 0xef, + 0xe8, 0xb4, 0xdf, 0x62, 0x65, 0x79, 0xfc, 0xab, 0x02, 0xeb, 0xcd, 0xe9, 0xc2, 0xa7, 0x64, 0x8e, + 0xea, 0xa0, 0x3a, 0xee, 0x88, 0xb0, 0x4e, 0xe5, 0xaa, 0xda, 0xfe, 0x23, 0x29, 0x41, 0xa6, 0xb9, + 0x6c, 0x5f, 0x77, 0xe8, 0x7c, 0x69, 0x0a, 0x5c, 0x45, 0x07, 0xb8, 0x0e, 0xa2, 0x32, 0xe4, 0x2e, + 0xc9, 0x52, 0x76, 0x88, 0xfd, 0x8a, 0xf6, 0x40, 0xbd, 0xb2, 0xa6, 0x0b, 0xc2, 0x7b, 0xa3, 0xed, + 0x6b, 0x91, 0xf3, 0x9b, 0x22, 0xf3, 0x3a, 0xfb, 0x8d, 0x82, 0x5f, 0x40, 0x99, 0x85, 0xda, 0xce, + 0x85, 0x1b, 0x0e, 0xd6, 0x27, 0x90, 0x67, 0x7b, 0x70, 0xb6, 0x44, 0x25, 0x4f, 0xe0, 0x03, 0x40, + 0x52, 0xd8, 0x3b, 0xd7, 0x76, 0x82, 0xd7, 0x71, 0x67, 0xd9, 0x67, 0x70, 0x5f, 0x96, 0x75, 0x88, + 0x75, 0x45, 0x82, 0xba, 0xc4, 0xe5, 0xe2, 0x1f, 0x42, 0x58, 0x4c, 0x55, 0x15, 0xd6, 0x87, 0x22, + 0x2c, 0x77, 0x28, 0xc5, 0x7b, 0x64, 0x06, 0x69, 0xfc, 0x97, 0x02, 0x5b, 0x32, 0x78, 0x66, 0xd1, + 0xe1, 0x24, 0xa4, 0x78, 0x09, 0x2a, 0xb9, 0x22, 0x0e, 0x95, 0x2f, 0x66, 0x2f, 0x4e, 0x10, 0xc3, + 0xd6, 0x74, 0x06, 0x34, 0x05, 0x3e, 0x3c, 0x5a, 0x76, 0xc5, 0xd1, 0xa2, 0xe2, 0x72, 0xe9, 0xe2, + 0x0e, 0x40, 0xe5, 0xd4, 0xf1, 0x09, 0x2a, 0x40, 0xfe, 0x5d, 0xb7, 0x6d, 0x88, 0x47, 0xd7, 0xd1, + 0x1b, 0x1f, 0xe4, 0xe4, 0x9c, 0x9e, 0xf0, 0x29, 0xca, 0xe1, 0x67, 0x80, 0x0e, 0x09, 0x6d, 0xb9, + 0xc3, 0xc5, 0x8c, 0xe9, 0x5a, 0xd1, 0xba, 0x26, 0xdc, 0x8f, 0xa1, 0xe4, 0xb9, 0xbf, 0x84, 0xb5, + 0x0b, 0x9b, 0x4c, 0x47, 0xbe, 0xec, 0xdc, 0x56, 0x4d, 0x18, 0x60, 0x2d, 0x30, 0xc0, 0x5a, 0xc3, + 0x59, 0x9a, 0x12, 0x83, 0xfb, 0xb0, 0xd5, 0x66, 0xda, 0xef, 0xd8, 0x2c, 0xc2, 0x9a, 0xfd, 0x17, + 0xac, 0x5f, 0xc1, 0x76, 0x82, 0x55, 0x8a, 0xdb, 0x02, 0x75, 0xe8, 0x2e, 0xe4, 0xa5, 0xa8, 0xa6, + 0x58, 0xe0, 0xe7, 0xb0, 0xdd, 0x22, 0x53, 0x42, 0xc9, 0x5d, 0x47, 0xae, 0xc1, 0x4e, 0x12, 0x98, + 0x4a, 0xdc, 0x81, 0x62, 0x8f, 0x58, 0x73, 0x76, 0xd3, 0x82, 0xf0, 0x5b, 0x28, 0xf9, 0x3c, 0x70, + 0x3e, 0x17, 0x91, 0xd4, 0x26, 0x15, 0xfd, 0x68, 0x31, 0x3e, 0x86, 0x52, 0xc0, 0x26, 0x77, 0x7d, + 0x05, 0xc5, 0x90, 0xce, 0x5f, 0x4c, 0xd3, 0xd9, 0x36, 0x03, 0x36, 0x86, 0xc4, 0xbf, 0x2b, 0xa0, + 0xf1, 0x1e, 0x35, 0x5d, 0xe7, 0xc2, 0x1e, 0x33, 0x2a, 0x3e, 0x44, 0xe7, 0x33, 0xcb, 0xf3, 0x6c, + 0x67, 0x9c, 0x4e, 0xc5, 0xa1, 0xef, 0x05, 0x12, 0xed, 0x02, 0x88, 0x52, 0xba, 0xf4, 0x88, 0xb4, + 0xc7, 0x0d, 0x1e, 0xe9, 0x2f, 0x3d, 0x36, 0x10, 0x48, 0xa4, 0x7d, 0xea, 0xce, 0xad, 0x31, 0x11, + 0xb0, 0x1c, 0x87, 0x95, 0x79, 0xa6, 0x27, 0x12, 0x0c, 0x8d, 0xbb, 0xb0, 0x73, 0x48, 0x68, 0x44, + 0x59, 0x78, 0xd8, 0x03, 0x10, 0xdb, 0x9e, 0x0f, 0x79, 0x5c, 0x0a, 0x44, 0x72, 0xf6, 0xa3, 0x15, + 0x9a, 0x7d, 0xbd, 0xc0, 0x06, 0x6c, 0x07, 0x84, 0xcc, 0x4d, 0xfd, 0x08, 0x9f, 0x16, 0xe8, 0xb2, + 0x68, 0xfa, 0xb4, 0x82, 0x1d, 0x96, 0xe3, 0x23, 0x28, 0x04, 0xb7, 0xff, 0xdf, 0xa6, 0x74, 0xff, + 0x8f, 0x75, 0x50, 0xb9, 0x2e, 0x64, 0xc2, 0xbd, 0xc4, 0x97, 0x14, 0xed, 0xa6, 0x7e, 0xef, 0x2b, + 0xff, 0x4f, 0xff, 0x00, 0xe3, 0x0c, 0xfa, 0x1e, 0x0a, 0x81, 0xd9, 0xa2, 0x9d, 0x1b, 0x3a, 0x74, + 0xf6, 0x47, 0x48, 0xe5, 0x41, 0x84, 0x25, 0xea, 0x7f, 0x38, 0x83, 0xde, 0x80, 0x16, 0xb1, 0x5d, + 0x94, 0xf8, 0x46, 0x44, 0xac, 0xb8, 0xb2, 0x82, 0x1c, 0x67, 0x50, 0x0b, 0x36, 0xa3, 0x1e, 0x8c, + 0x2a, 0x71, 0x92, 0xa8, 0x31, 0xa7, 0xb0, 0x34, 0x43, 0x25, 0xa9, 0x67, 0x49, 0x90, 0x27, 0x8e, + 0x73, 0x18, 0x4a, 0xe1, 0xce, 0xbb, 0x92, 0xe5, 0x71, 0x8a, 0x4d, 0xe3, 0xcc, 0xd7, 0x0a, 0x7a, + 0x0b, 0x5a, 0xc4, 0xf5, 0xc2, 0xbe, 0xdc, 0xf4, 0xcb, 0x50, 0xd0, 0x2d, 0x26, 0x89, 0x33, 0xc8, + 0x80, 0x62, 0xcc, 0xa2, 0xd0, 0xe3, 0xe8, 0x20, 0x27, 0xb9, 0x9e, 0xdc, 0x9e, 0x0c, 0xd8, 0xaa, + 0x0a, 0xfa, 0x11, 0x4a, 0x71, 0x6b, 0x42, 0x41, 0xcd, 0xad, 0xd6, 0x56, 0xd9, 0x5d, 0x91, 0x8d, + 0x50, 0xbe, 0x84, 0x35, 0xe1, 0x37, 0x68, 0x4b, 0x82, 0x63, 0x66, 0x56, 0xd9, 0x4e, 0x44, 0xc3, + 0xb3, 0xb5, 0xa1, 0x14, 0x7f, 0xc3, 0x2b, 0xdb, 0xbd, 0x7b, 0xdd, 0xa3, 0x5b, 0x9e, 0x3c, 0xbf, + 0xb7, 0x62, 0xec, 0xf5, 0xae, 0x64, 0x7a, 0x92, 0x60, 0x8a, 0xbd, 0x75, 0x9c, 0x41, 0xdf, 0x41, + 0xa1, 0xe7, 0x58, 0x9e, 0x3f, 0x71, 0xe9, 0x4a, 0x8e, 0x95, 0x33, 0xf8, 0xa6, 0xfa, 0xd3, 0xe7, + 0x63, 0x9b, 0x4e, 0x16, 0x83, 0xda, 0xd0, 0x9d, 0xd5, 0x67, 0xae, 0xbf, 0xb8, 0xb4, 0xea, 0x83, + 0xa9, 0xe5, 0xd3, 0x7a, 0xfc, 0x9f, 0x83, 0xc1, 0x1a, 0x5f, 0xbf, 0xf8, 0x27, 0x00, 0x00, 0xff, + 0xff, 0xbf, 0x2f, 0x50, 0x92, 0x35, 0x0c, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/protobuf/index/index.proto b/protobuf/index/index.proto index bf354b0..3edab24 100644 --- a/protobuf/index/index.proto +++ b/protobuf/index/index.proto @@ -144,8 +144,14 @@ message SearchResponse { google.protobuf.Any search_result = 1; } +message IndexConfig { + google.protobuf.Any index_mapping = 1; + string index_type = 2; + string index_storage_type = 3; +} + message GetIndexConfigResponse { - google.protobuf.Any index_config = 1; + IndexConfig index_config = 1; } message GetIndexStatsResponse { From e28f6d1d9cea0a14526678a69bfdaaaa08c5a690 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 6 Aug 2019 09:30:16 +0900 Subject: [PATCH 25/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index 3cc4182..5141ff9 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -23,6 +23,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Change node info structure #94 - Change protobuf for indexer and dispatcher #95 - Change server arguments #96 +- Change index protobuf #97 ## [v0.7.1] - 2019-07-18 From 4344f4ceca14156e7888e51d777fc528f135719d Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 6 Aug 2019 19:49:41 +0900 Subject: [PATCH 26/76] Use protobuf document (#98) --- cmd/blast/dispatcher_get.go | 8 +- cmd/blast/dispatcher_index.go | 30 +-- cmd/blast/indexer_get.go | 8 +- cmd/blast/indexer_index.go | 31 +-- dispatcher/grpc_client.go | 23 +-- dispatcher/grpc_service.go | 58 ++---- dispatcher/http_handler.go | 48 +++-- go.mod | 1 + indexer/grpc_client.go | 24 +-- indexer/grpc_service.go | 42 ++-- indexer/http_handler.go | 46 +++-- indexer/index.go | 23 +-- indexer/raft_fsm.go | 25 +-- indexer/raft_server.go | 3 +- indexer/server_test.go | 68 +++--- indexutils/document.go | 65 ------ protobuf/distribute/distribute.pb.go | 105 +++++----- protobuf/distribute/distribute.proto | 6 +- protobuf/index/index.go | 62 ++++++ protobuf/index/index.pb.go | 296 +++++++++++++-------------- protobuf/index/index.proto | 15 +- testutils/testutils.go | 29 --- 22 files changed, 470 insertions(+), 546 deletions(-) delete mode 100644 indexutils/document.go create mode 100644 protobuf/index/index.go diff --git a/cmd/blast/dispatcher_get.go b/cmd/blast/dispatcher_get.go index d9108fb..f46c7a7 100644 --- a/cmd/blast/dispatcher_get.go +++ b/cmd/blast/dispatcher_get.go @@ -15,12 +15,12 @@ package main import ( - "encoding/json" "errors" "fmt" "os" "github.com/mosuka/blast/dispatcher" + "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" ) @@ -43,17 +43,17 @@ func dispatcherGet(c *cli.Context) error { } }() - fields, err := client.GetDocument(id) + doc, err := client.GetDocument(id) if err != nil { return err } - fieldsBytes, err := json.MarshalIndent(fields, "", " ") + docBytes, err := index.MarshalDocument(doc) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(fieldsBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(docBytes))) return nil } diff --git a/cmd/blast/dispatcher_index.go b/cmd/blast/dispatcher_index.go index 57f282d..86c2dd7 100644 --- a/cmd/blast/dispatcher_index.go +++ b/cmd/blast/dispatcher_index.go @@ -23,7 +23,7 @@ import ( "os" "github.com/mosuka/blast/dispatcher" - "github.com/mosuka/blast/indexutils" + "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" ) @@ -34,22 +34,24 @@ func dispatcherIndex(c *cli.Context) error { id := c.Args().Get(0) fieldsSrc := c.Args().Get(1) - docs := make([]*indexutils.Document, 0) + docs := make([]*index.Document, 0) if id != "" && fieldsSrc != "" { - // create fields - var fields map[string]interface{} - err := json.Unmarshal([]byte(fieldsSrc), &fields) + var fieldsMap map[string]interface{} + err := json.Unmarshal([]byte(fieldsSrc), &fieldsMap) if err != nil { return err } - - // create document - doc, err := indexutils.NewDocument(id, fields) + docMap := map[string]interface{}{ + "id": id, + "fields": fieldsMap, + } + docBytes, err := json.Marshal(docMap) if err != nil { return err } - + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) docs = append(docs, doc) } @@ -80,7 +82,8 @@ func dispatcherIndex(c *cli.Context) error { if err != nil { if err == io.EOF || err == io.ErrClosedPipe { if len(docBytes) > 0 { - doc, err := indexutils.NewDocumentFromBytes(docBytes) + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) if err != nil { return err } @@ -91,7 +94,8 @@ func dispatcherIndex(c *cli.Context) error { } if len(docBytes) > 0 { - doc, err := indexutils.NewDocumentFromBytes(docBytes) + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) if err != nil { return err } @@ -103,8 +107,8 @@ func dispatcherIndex(c *cli.Context) error { if err != nil { return err } - - doc, err := indexutils.NewDocumentFromBytes(docBytes) + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) if err != nil { return err } diff --git a/cmd/blast/indexer_get.go b/cmd/blast/indexer_get.go index 5a59a3e..53abb27 100644 --- a/cmd/blast/indexer_get.go +++ b/cmd/blast/indexer_get.go @@ -15,12 +15,12 @@ package main import ( - "encoding/json" "errors" "fmt" "os" "github.com/mosuka/blast/indexer" + "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" ) @@ -43,17 +43,17 @@ func indexerGet(c *cli.Context) error { } }() - fields, err := client.GetDocument(id) + doc, err := client.GetDocument(id) if err != nil { return err } - fieldsBytes, err := json.MarshalIndent(fields, "", " ") + docBytes, err := index.MarshalDocument(doc) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(fieldsBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(docBytes))) return nil } diff --git a/cmd/blast/indexer_index.go b/cmd/blast/indexer_index.go index 9b53711..c70e6ea 100644 --- a/cmd/blast/indexer_index.go +++ b/cmd/blast/indexer_index.go @@ -22,8 +22,9 @@ import ( "io/ioutil" "os" + "github.com/mosuka/blast/protobuf/index" + "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/indexutils" "github.com/urfave/cli" ) @@ -34,22 +35,24 @@ func indexerIndex(c *cli.Context) error { id := c.Args().Get(0) fieldsSrc := c.Args().Get(1) - docs := make([]*indexutils.Document, 0) + docs := make([]*index.Document, 0) if id != "" && fieldsSrc != "" { - // create fields - var fields map[string]interface{} - err := json.Unmarshal([]byte(fieldsSrc), &fields) + var fieldsMap map[string]interface{} + err := json.Unmarshal([]byte(fieldsSrc), &fieldsMap) if err != nil { return err } - - // create document - doc, err := indexutils.NewDocument(id, fields) + docMap := map[string]interface{}{ + "id": id, + "fields": fieldsMap, + } + docBytes, err := json.Marshal(docMap) if err != nil { return err } - + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) docs = append(docs, doc) } @@ -80,7 +83,8 @@ func indexerIndex(c *cli.Context) error { if err != nil { if err == io.EOF || err == io.ErrClosedPipe { if len(docBytes) > 0 { - doc, err := indexutils.NewDocumentFromBytes(docBytes) + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) if err != nil { return err } @@ -91,7 +95,8 @@ func indexerIndex(c *cli.Context) error { } if len(docBytes) > 0 { - doc, err := indexutils.NewDocumentFromBytes(docBytes) + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) if err != nil { return err } @@ -103,8 +108,8 @@ func indexerIndex(c *cli.Context) error { if err != nil { return err } - - doc, err := indexutils.NewDocumentFromBytes(docBytes) + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) if err != nil { return err } diff --git a/dispatcher/grpc_client.go b/dispatcher/grpc_client.go index b1e4820..b7cf07d 100644 --- a/dispatcher/grpc_client.go +++ b/dispatcher/grpc_client.go @@ -22,9 +22,9 @@ import ( "github.com/blevesearch/bleve" "github.com/golang/protobuf/ptypes/any" blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/distribute" + "github.com/mosuka/blast/protobuf/index" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -120,7 +120,7 @@ func (c *GRPCClient) NodeHealthCheck(probe string, opts ...grpc.CallOption) (str return resp.State.String(), nil } -func (c *GRPCClient) GetDocument(id string, opts ...grpc.CallOption) (map[string]interface{}, error) { +func (c *GRPCClient) GetDocument(id string, opts ...grpc.CallOption) (*index.Document, error) { req := &distribute.GetDocumentRequest{ Id: id, } @@ -137,10 +137,7 @@ func (c *GRPCClient) GetDocument(id string, opts ...grpc.CallOption) (map[string } } - ins, err := protobuf.MarshalAny(resp.Fields) - fields := *ins.(*map[string]interface{}) - - return fields, nil + return resp.Document, nil } func (c *GRPCClient) Search(searchRequest *bleve.SearchRequest, opts ...grpc.CallOption) (*bleve.SearchResult, error) { @@ -177,7 +174,7 @@ func (c *GRPCClient) Search(searchRequest *bleve.SearchRequest, opts ...grpc.Cal return searchResult, nil } -func (c *GRPCClient) IndexDocument(docs []*indexutils.Document, opts ...grpc.CallOption) (int, error) { +func (c *GRPCClient) IndexDocument(docs []*index.Document, opts ...grpc.CallOption) (int, error) { stream, err := c.client.IndexDocument(c.ctx, opts...) if err != nil { st, _ := status.FromError(err) @@ -186,18 +183,8 @@ func (c *GRPCClient) IndexDocument(docs []*indexutils.Document, opts ...grpc.Cal } for _, doc := range docs { - id := doc.Id - fields := doc.Fields - - fieldsAny := &any.Any{} - err := protobuf.UnmarshalAny(&fields, fieldsAny) - if err != nil { - return -1, err - } - req := &distribute.IndexDocumentRequest{ - Id: id, - Fields: fieldsAny, + Document: doc, } err = stream.Send(req) diff --git a/dispatcher/grpc_service.go b/dispatcher/grpc_service.go index 9ebf9e0..309b6d6 100644 --- a/dispatcher/grpc_service.go +++ b/dispatcher/grpc_service.go @@ -25,16 +25,14 @@ import ( "sync" "time" - "github.com/mosuka/blast/protobuf/index" - "github.com/blevesearch/bleve" "github.com/blevesearch/bleve/search" "github.com/golang/protobuf/ptypes/any" "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/manager" "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/distribute" + "github.com/mosuka/blast/protobuf/index" "github.com/mosuka/blast/protobuf/management" "github.com/mosuka/blast/sortutils" "go.uber.org/zap" @@ -201,10 +199,10 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.logger.Info("wait for receive a manager cluster updates from stream") resp, err := stream.Recv() - if err == io.EOF { - s.logger.Info(err.Error()) - continue - } + //if err == io.EOF { + // s.logger.Info(err.Error()) + // continue + //} if err != nil { s.logger.Error(err.Error()) continue @@ -539,7 +537,7 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *distribute.GetDocume type respVal struct { clusterId string - fields map[string]interface{} + doc *index.Document err error } @@ -551,11 +549,11 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *distribute.GetDocume wg.Add(1) go func(clusterId string, client *indexer.GRPCClient, id string, respChan chan respVal) { // index documents - fields, err := client.GetDocument(id) + doc, err := client.GetDocument(id) wg.Done() respChan <- respVal{ clusterId: clusterId, - fields: fields, + doc: doc, err: err, } }(clusterId, client, req.Id, respChan) @@ -566,10 +564,10 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *distribute.GetDocume close(respChan) // summarize responses - var fields map[string]interface{} + var doc *index.Document for r := range respChan { - if r.fields != nil { - fields = r.fields + if r.doc != nil { + doc = r.doc } if r.err != nil { s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) @@ -578,15 +576,8 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *distribute.GetDocume resp := &distribute.GetDocumentResponse{} - fieldsAny := &any.Any{} - err := protobuf.UnmarshalAny(fields, fieldsAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, err - } - // response - resp.Fields = fieldsAny + resp.Document = doc return resp, nil } @@ -736,9 +727,9 @@ func (s *GRPCService) IndexDocument(stream distribute.Distribute_IndexDocumentSe } // initialize document list for each cluster - docSet := make(map[string][]*indexutils.Document, 0) + docSet := make(map[string][]*index.Document, 0) for _, clusterId := range clusterIds { - docSet[clusterId] = make([]*indexutils.Document, 0) + docSet[clusterId] = make([]*index.Document, 0) } for { @@ -752,26 +743,11 @@ func (s *GRPCService) IndexDocument(stream distribute.Distribute_IndexDocumentSe return status.Error(codes.Internal, err.Error()) } - // fields - ins, err := protobuf.MarshalAny(req.Fields) - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - fields := *ins.(*map[string]interface{}) - - // document - doc, err := indexutils.NewDocument(req.Id, fields) - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - // distribute documents to each cluster based on document id - docIdHash := s.docIdHash(req.Id) + docIdHash := s.docIdHash(req.Document.Id) clusterNum := uint64(len(indexerClients)) clusterId := clusterIds[int(docIdHash%clusterNum)] - docSet[clusterId] = append(docSet[clusterId], doc) + docSet[clusterId] = append(docSet[clusterId], req.Document) } type respVal struct { @@ -786,7 +762,7 @@ func (s *GRPCService) IndexDocument(stream distribute.Distribute_IndexDocumentSe wg := &sync.WaitGroup{} for clusterId, docs := range docSet { wg.Add(1) - go func(clusterId string, docs []*indexutils.Document, respChan chan respVal) { + go func(clusterId string, docs []*index.Document, respChan chan respVal) { count, err := indexerClients[clusterId].IndexDocument(docs) wg.Done() respChan <- respVal{ diff --git a/dispatcher/http_handler.go b/dispatcher/http_handler.go index e44c681..dec3163 100644 --- a/dispatcher/http_handler.go +++ b/dispatcher/http_handler.go @@ -24,10 +24,11 @@ import ( "time" "github.com/blevesearch/bleve" + "github.com/golang/protobuf/proto" "github.com/gorilla/mux" "github.com/mosuka/blast/errors" blasthttp "github.com/mosuka/blast/http" - "github.com/mosuka/blast/indexutils" + "github.com/mosuka/blast/protobuf/index" "github.com/mosuka/blast/version" "github.com/prometheus/client_golang/prometheus/promhttp" "go.uber.org/zap" @@ -127,7 +128,7 @@ func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) - fields, err := h.client.GetDocument(vars["id"]) + doc, err := h.client.GetDocument(vars["id"]) if err != nil { switch err { case errors.ErrNotFound: @@ -150,8 +151,7 @@ func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - // map[string]interface{} -> bytes - content, err = json.MarshalIndent(fields, "", " ") + content, err = index.MarshalDocument(doc) if err != nil { status = http.StatusInternalServerError @@ -192,7 +192,7 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer blasthttp.RecordMetrics(start, status, w, r) // create documents - docs := make([]*indexutils.Document, 0) + docs := make([]*index.Document, 0) vars := mux.Vars(r) id := vars["id"] @@ -233,7 +233,9 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err != nil { if err == io.EOF || err == io.ErrClosedPipe { if len(docBytes) > 0 { - doc, err := indexutils.NewDocumentFromBytes(docBytes) + var doc *index.Document + err = proto.Unmarshal(bodyBytes, doc) + //doc, err := indexutils.NewDocumentFromBytes(docBytes) if err != nil { status = http.StatusBadRequest @@ -271,7 +273,8 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } if len(docBytes) > 0 { - doc, err := indexutils.NewDocumentFromBytes(docBytes) + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) if err != nil { status = http.StatusBadRequest @@ -292,7 +295,8 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } } else { - doc, err := indexutils.NewDocumentFromBytes(bodyBytes) + doc := &index.Document{} + err = index.UnmarshalDocument(bodyBytes, doc) if err != nil { status = http.StatusBadRequest @@ -312,8 +316,8 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { docs = append(docs, doc) } } else { - var fields map[string]interface{} - err = json.Unmarshal(bodyBytes, &fields) + var fieldsMap map[string]interface{} + err := json.Unmarshal([]byte(bodyBytes), &fieldsMap) if err != nil { status = http.StatusBadRequest @@ -330,8 +334,11 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { blasthttp.WriteResponse(w, content, status, h.logger) return } - - doc, err := indexutils.NewDocument(id, fields) + docMap := map[string]interface{}{ + "id": id, + "fields": fieldsMap, + } + docBytes, err := json.Marshal(docMap) if err != nil { status = http.StatusBadRequest @@ -348,7 +355,24 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { blasthttp.WriteResponse(w, content, status, h.logger) return } + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) + if err != nil { + status = http.StatusBadRequest + + msgMap := map[string]interface{}{ + "message": err.Error(), + "status": status, + } + content, err = blasthttp.NewJSONMessage(msgMap) + if err != nil { + h.logger.Error(err.Error()) + } + + blasthttp.WriteResponse(w, content, status, h.logger) + return + } docs = append(docs, doc) } diff --git a/go.mod b/go.mod index c987c7c..9edf824 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect + github.com/gogo/protobuf v1.1.1 github.com/golang/protobuf v1.3.1 github.com/google/go-cmp v0.3.0 github.com/gorilla/mux v1.7.0 diff --git a/indexer/grpc_client.go b/indexer/grpc_client.go index 4759e19..0d9fb3d 100644 --- a/indexer/grpc_client.go +++ b/indexer/grpc_client.go @@ -23,7 +23,6 @@ import ( "github.com/golang/protobuf/ptypes/any" "github.com/golang/protobuf/ptypes/empty" blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/index" "google.golang.org/grpc" @@ -181,7 +180,7 @@ func (c *GRPCClient) ClusterWatch(opts ...grpc.CallOption) (index.Index_ClusterW return watchClient, nil } -func (c *GRPCClient) GetDocument(id string, opts ...grpc.CallOption) (map[string]interface{}, error) { +func (c *GRPCClient) GetDocument(id string, opts ...grpc.CallOption) (*index.Document, error) { req := &index.GetDocumentRequest{ Id: id, } @@ -198,10 +197,7 @@ func (c *GRPCClient) GetDocument(id string, opts ...grpc.CallOption) (map[string } } - ins, err := protobuf.MarshalAny(resp.Fields) - fields := *ins.(*map[string]interface{}) - - return fields, nil + return resp.Document, nil } func (c *GRPCClient) Search(searchRequest *bleve.SearchRequest, opts ...grpc.CallOption) (*bleve.SearchResult, error) { @@ -238,7 +234,7 @@ func (c *GRPCClient) Search(searchRequest *bleve.SearchRequest, opts ...grpc.Cal return searchResult, nil } -func (c *GRPCClient) IndexDocument(docs []*indexutils.Document, opts ...grpc.CallOption) (int, error) { +func (c *GRPCClient) IndexDocument(docs []*index.Document, opts ...grpc.CallOption) (int, error) { stream, err := c.client.IndexDocument(c.ctx, opts...) if err != nil { st, _ := status.FromError(err) @@ -247,18 +243,10 @@ func (c *GRPCClient) IndexDocument(docs []*indexutils.Document, opts ...grpc.Cal } for _, doc := range docs { - id := doc.Id - fields := doc.Fields - - fieldsAny := &any.Any{} - err := protobuf.UnmarshalAny(&fields, fieldsAny) - if err != nil { - return -1, err - } - req := &index.IndexDocumentRequest{ - Id: id, - Fields: fieldsAny, + Document: doc, + //Id: id, + //Fields: fieldsAny, } err = stream.Send(req) diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go index 2613c56..2161fda 100644 --- a/indexer/grpc_service.go +++ b/indexer/grpc_service.go @@ -23,15 +23,13 @@ import ( "sync" "time" - "github.com/blevesearch/bleve/mapping" - "github.com/blevesearch/bleve" + "github.com/blevesearch/bleve/mapping" "github.com/golang/protobuf/ptypes/any" "github.com/golang/protobuf/ptypes/empty" "github.com/google/go-cmp/cmp" "github.com/hashicorp/raft" blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/manager" "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/index" @@ -775,14 +773,25 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *index.GetDocumentReq } } - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(fields, fieldsAny) + docMap := map[string]interface{}{ + "id": req.Id, + "fields": fields, + } + + docBytes, err := json.Marshal(docMap) if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", req.Id)) + return resp, status.Error(codes.Internal, err.Error()) + } + + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) + if err != nil { + s.logger.Error(err.Error(), zap.String("id", req.Id)) return resp, status.Error(codes.Internal, err.Error()) } - resp.Fields = fieldsAny + resp.Document = doc return resp, nil } @@ -815,7 +824,7 @@ func (s *GRPCService) Search(ctx context.Context, req *index.SearchRequest) (*in } func (s *GRPCService) IndexDocument(stream index.Index_IndexDocumentServer) error { - docs := make([]*indexutils.Document, 0) + docs := make([]*index.Document, 0) for { req, err := stream.Recv() @@ -828,22 +837,7 @@ func (s *GRPCService) IndexDocument(stream index.Index_IndexDocumentServer) erro return status.Error(codes.Internal, err.Error()) } - // fields - ins, err := protobuf.MarshalAny(req.Fields) - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - fields := *ins.(*map[string]interface{}) - - // document - doc, err := indexutils.NewDocument(req.Id, fields) - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - - docs = append(docs, doc) + docs = append(docs, req.Document) } // index diff --git a/indexer/http_handler.go b/indexer/http_handler.go index 697cc2e..6f7688a 100644 --- a/indexer/http_handler.go +++ b/indexer/http_handler.go @@ -27,7 +27,7 @@ import ( "github.com/gorilla/mux" blasterrors "github.com/mosuka/blast/errors" blasthttp "github.com/mosuka/blast/http" - "github.com/mosuka/blast/indexutils" + "github.com/mosuka/blast/protobuf/index" "github.com/mosuka/blast/version" "github.com/prometheus/client_golang/prometheus/promhttp" "go.uber.org/zap" @@ -129,7 +129,7 @@ func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { id := vars["id"] - fields, err := h.client.GetDocument(id) + doc, err := h.client.GetDocument(id) if err != nil { switch err { case blasterrors.ErrNotFound: @@ -152,8 +152,7 @@ func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - // map[string]interface{} -> bytes - content, err = blasthttp.NewJSONMessage(fields) + content, err = index.MarshalDocument(doc) if err != nil { status = http.StatusInternalServerError @@ -194,7 +193,7 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer blasthttp.RecordMetrics(start, status, w, r) // create documents - docs := make([]*indexutils.Document, 0) + docs := make([]*index.Document, 0) vars := mux.Vars(r) id := vars["id"] @@ -235,7 +234,8 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err != nil { if err == io.EOF || err == io.ErrClosedPipe { if len(docBytes) > 0 { - doc, err := indexutils.NewDocumentFromBytes(docBytes) + doc := &index.Document{} + err = index.UnmarshalDocument(bodyBytes, doc) if err != nil { status = http.StatusBadRequest @@ -273,7 +273,8 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } if len(docBytes) > 0 { - doc, err := indexutils.NewDocumentFromBytes(docBytes) + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) if err != nil { status = http.StatusBadRequest @@ -294,7 +295,8 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } } else { - doc, err := indexutils.NewDocumentFromBytes(bodyBytes) + doc := &index.Document{} + err = index.UnmarshalDocument(bodyBytes, doc) if err != nil { status = http.StatusBadRequest @@ -314,8 +316,8 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { docs = append(docs, doc) } } else { - var fields map[string]interface{} - err = json.Unmarshal(bodyBytes, &fields) + var fieldsMap map[string]interface{} + err := json.Unmarshal([]byte(bodyBytes), &fieldsMap) if err != nil { status = http.StatusBadRequest @@ -332,8 +334,11 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { blasthttp.WriteResponse(w, content, status, h.logger) return } - - doc, err := indexutils.NewDocument(id, fields) + docMap := map[string]interface{}{ + "id": id, + "fields": fieldsMap, + } + docBytes, err := json.Marshal(docMap) if err != nil { status = http.StatusBadRequest @@ -350,7 +355,24 @@ func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { blasthttp.WriteResponse(w, content, status, h.logger) return } + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) + if err != nil { + status = http.StatusBadRequest + + msgMap := map[string]interface{}{ + "message": err.Error(), + "status": status, + } + content, err = blasthttp.NewJSONMessage(msgMap) + if err != nil { + h.logger.Error(err.Error()) + } + + blasthttp.WriteResponse(w, content, status, h.logger) + return + } docs = append(docs, doc) } diff --git a/indexer/index.go b/indexer/index.go index accc520..2c8a031 100644 --- a/indexer/index.go +++ b/indexer/index.go @@ -137,12 +137,8 @@ func (i *Index) Search(request *bleve.SearchRequest) (*bleve.SearchResult, error return result, nil } -func (i *Index) Index(id string, fields map[string]interface{}) error { - doc := map[string]interface{}{ - "id": id, - "fields": fields, - } - _, err := i.BulkIndex([]map[string]interface{}{doc}) +func (i *Index) Index(doc *index.Document) error { + _, err := i.BulkIndex([]*index.Document{doc}) if err != nil { i.logger.Error(err.Error()) return err @@ -151,23 +147,18 @@ func (i *Index) Index(id string, fields map[string]interface{}) error { return nil } -func (i *Index) BulkIndex(docs []map[string]interface{}) (int, error) { +func (i *Index) BulkIndex(docs []*index.Document) (int, error) { batch := i.index.NewBatch() count := 0 for _, doc := range docs { - id, ok := doc["id"].(string) - if !ok { - i.logger.Error("missing document id") - continue - } - fields, ok := doc["fields"].(map[string]interface{}) - if !ok { - i.logger.Error("missing document fields") + fieldsIntr, err := protobuf.MarshalAny(doc.Fields) + if err != nil { + i.logger.Error(err.Error(), zap.Any("doc", doc)) continue } - err := batch.Index(id, fields) + err = batch.Index(doc.Id, *fieldsIntr.(*map[string]interface{})) if err != nil { i.logger.Error(err.Error()) continue diff --git a/indexer/raft_fsm.go b/indexer/raft_fsm.go index a12c541..95590fc 100644 --- a/indexer/raft_fsm.go +++ b/indexer/raft_fsm.go @@ -21,13 +21,11 @@ import ( "io/ioutil" "sync" - "github.com/blevesearch/bleve/mapping" - "github.com/blevesearch/bleve" + "github.com/blevesearch/bleve/mapping" "github.com/golang/protobuf/proto" "github.com/hashicorp/raft" blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/index" "go.uber.org/zap" ) @@ -130,8 +128,8 @@ func (f *RaftFSM) GetDocument(id string) (map[string]interface{}, error) { return fields, nil } -func (f *RaftFSM) IndexDocument(id string, fields map[string]interface{}) error { - err := f.index.Index(id, fields) +func (f *RaftFSM) IndexDocument(doc *index.Document) error { + err := f.index.Index(doc) if err != nil { f.logger.Error(err.Error()) return err @@ -140,7 +138,7 @@ func (f *RaftFSM) IndexDocument(id string, fields map[string]interface{}) error return nil } -func (f *RaftFSM) IndexDocuments(docs []map[string]interface{}) (int, error) { +func (f *RaftFSM) IndexDocuments(docs []*index.Document) (int, error) { count, err := f.index.BulkIndex(docs) if err != nil { f.logger.Error(err.Error()) @@ -244,7 +242,7 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { err = f.DeleteNode(data["id"].(string)) return &fsmResponse{error: err} case indexDocument: - var data []map[string]interface{} + var data []*index.Document err := json.Unmarshal(msg.Data, &data) if err != nil { f.logger.Error(err.Error()) @@ -307,18 +305,7 @@ func (f *RaftFSM) Restore(rc io.ReadCloser) error { continue } - fields, err := protobuf.MarshalAny(doc.Fields) - if err != nil { - f.logger.Error(err.Error()) - continue - } - if fields == nil { - f.logger.Error("value is nil") - continue - } - fieldsMap := *fields.(*map[string]interface{}) - - err = f.index.Index(doc.Id, fieldsMap) + err = f.index.Index(doc) if err != nil { f.logger.Error(err.Error()) continue diff --git a/indexer/raft_server.go b/indexer/raft_server.go index c3ccf73..0903e67 100644 --- a/indexer/raft_server.go +++ b/indexer/raft_server.go @@ -30,7 +30,6 @@ import ( raftbadgerdb "github.com/markthethomas/raft-badger" _ "github.com/mosuka/blast/builtins" blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/protobuf/index" "go.uber.org/zap" //raftmdb "github.com/hashicorp/raft-mdb" @@ -552,7 +551,7 @@ func (s *RaftServer) Search(request *bleve.SearchRequest) (*bleve.SearchResult, return result, nil } -func (s *RaftServer) IndexDocument(docs []*indexutils.Document) (int, error) { +func (s *RaftServer) IndexDocument(docs []*index.Document) (int, error) { if !s.IsLeader() { s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) return -1, raft.ErrNotLeader diff --git a/indexer/server_test.go b/indexer/server_test.go index 0cd7957..dd2f7b1 100644 --- a/indexer/server_test.go +++ b/indexer/server_test.go @@ -765,7 +765,7 @@ func TestServer_PutDocument(t *testing.T) { } // put document - docs := make([]*indexutils.Document, 0) + docs := make([]*index.Document, 0) docPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") // read index mapping file docFile1, err := os.Open(docPath1) @@ -779,12 +779,8 @@ func TestServer_PutDocument(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - var docFields1 map[string]interface{} - err = json.Unmarshal(docBytes1, &docFields1) - if err != nil { - t.Fatalf("%v", err) - } - doc1, err := indexutils.NewDocument("doc1", docFields1) + doc1 := &index.Document{} + err = index.UnmarshalDocument(docBytes1, doc1) if err != nil { t.Fatalf("%v", err) } @@ -868,7 +864,7 @@ func TestServer_GetDocument(t *testing.T) { } // put document - putDocs := make([]*indexutils.Document, 0) + putDocs := make([]*index.Document, 0) putDocPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") // read index mapping file putDocFile1, err := os.Open(putDocPath1) @@ -882,7 +878,8 @@ func TestServer_GetDocument(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - putDoc1, err := indexutils.NewDocumentFromBytes(putDocBytes1) + putDoc1 := &index.Document{} + err = index.UnmarshalDocument(putDocBytes1, putDoc1) if err != nil { t.Fatalf("%v", err) } @@ -900,14 +897,14 @@ func TestServer_GetDocument(t *testing.T) { } // get document - getDocFields1, err := client.GetDocument("enwiki_1") + getDoc1, err := client.GetDocument("enwiki_1") if err != nil { t.Fatalf("%v", err) } - expGetDocFields1 := putDoc1.Fields - actGetDocFields1 := getDocFields1 - if !reflect.DeepEqual(expGetDocFields1, actGetDocFields1) { - t.Fatalf("expected content to see %v, saw %v", expGetDocFields1, actGetDocFields1) + expGetDoc1, _ := index.MarshalDocument(putDoc1) + actGetDoc1, _ := index.MarshalDocument(getDoc1) + if !reflect.DeepEqual(expGetDoc1, actGetDoc1) { + t.Fatalf("expected content to see %v, saw %v", expGetDoc1, actGetDoc1) } // get non-existing document @@ -986,7 +983,7 @@ func TestServer_DeleteDocument(t *testing.T) { } // put document - putDocs := make([]*indexutils.Document, 0) + putDocs := make([]*index.Document, 0) putDocPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") // read index mapping file putDocFile1, err := os.Open(putDocPath1) @@ -1000,7 +997,8 @@ func TestServer_DeleteDocument(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - putDoc1, err := indexutils.NewDocumentFromBytes(putDocBytes1) + putDoc1 := &index.Document{} + err = index.UnmarshalDocument(putDocBytes1, putDoc1) if err != nil { t.Fatalf("%v", err) } @@ -1018,23 +1016,23 @@ func TestServer_DeleteDocument(t *testing.T) { } // get document - getDocFields1, err := client.GetDocument("enwiki_1") + getDoc1, err := client.GetDocument("enwiki_1") if err != nil { t.Fatalf("%v", err) } - expGetDocFields1 := putDoc1.Fields - actGetDocFields1 := getDocFields1 - if !reflect.DeepEqual(expGetDocFields1, actGetDocFields1) { - t.Fatalf("expected content to see %v, saw %v", expGetDocFields1, actGetDocFields1) + expGetDoc1, _ := index.MarshalDocument(putDoc1) + actGetDoc1, _ := index.MarshalDocument(getDoc1) + if !reflect.DeepEqual(expGetDoc1, actGetDoc1) { + t.Fatalf("expected content to see %v, saw %v", expGetDoc1, actGetDoc1) } // get non-existing document - getDocFields2, err := client.GetDocument("non-existing") + getDoc2, err := client.GetDocument("non-existing") if err != errors.ErrNotFound { t.Fatalf("%v", err) } - if getDocFields2 != nil { - t.Fatalf("expected content to see nil, saw %v", getDocFields2) + if getDoc2 != nil { + t.Fatalf("expected content to see nil, saw %v", getDoc2) } // delete document @@ -1049,21 +1047,21 @@ func TestServer_DeleteDocument(t *testing.T) { } // get document - getDocFields1, err = client.GetDocument("enwiki_1") + getDoc1, err = client.GetDocument("enwiki_1") if err != errors.ErrNotFound { t.Fatalf("%v", err) } - if getDocFields1 != nil { - t.Fatalf("expected content to see nil, saw %v", getDocFields1) + if getDoc1 != nil { + t.Fatalf("expected content to see nil, saw %v", getDoc1) } // delete non-existing document - getDocFields1, err = client.GetDocument("non-existing") + getDoc1, err = client.GetDocument("non-existing") if err != errors.ErrNotFound { t.Fatalf("%v", err) } - if getDocFields1 != nil { - t.Fatalf("expected content to see nil, saw %v", getDocFields1) + if getDoc1 != nil { + t.Fatalf("expected content to see nil, saw %v", getDoc1) } } @@ -1133,7 +1131,7 @@ func TestServer_Search(t *testing.T) { } // put document - putDocs := make([]*indexutils.Document, 0) + putDocs := make([]*index.Document, 0) putDocPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") // read index mapping file putDocFile1, err := os.Open(putDocPath1) @@ -1147,12 +1145,8 @@ func TestServer_Search(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - var putDocFields1 map[string]interface{} - err = json.Unmarshal(putDocBytes1, &putDocFields1) - if err != nil { - t.Fatalf("%v", err) - } - putDoc1, err := indexutils.NewDocument("doc1", putDocFields1) + putDoc1 := &index.Document{} + err = index.UnmarshalDocument(putDocBytes1, putDoc1) if err != nil { t.Fatalf("%v", err) } diff --git a/indexutils/document.go b/indexutils/document.go deleted file mode 100644 index b550647..0000000 --- a/indexutils/document.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexutils - -import ( - "encoding/json" - "errors" -) - -type Document struct { - Id string `json:"id,omitempty"` - Fields map[string]interface{} `json:"fields,omitempty"` -} - -func NewDocument(id string, fields map[string]interface{}) (*Document, error) { - doc := &Document{ - Id: id, - Fields: fields, - } - - if err := doc.Validate(); err != nil { - return nil, err - } - - return doc, nil -} - -func NewDocumentFromBytes(src []byte) (*Document, error) { - var doc *Document - - err := json.Unmarshal(src, &doc) - if err != nil { - return nil, err - } - - if err := doc.Validate(); err != nil { - return nil, err - } - - return doc, nil -} - -func (d *Document) Validate() error { - if d.Id == "" { - return errors.New("id is empty") - } - - if len(d.Fields) <= 0 { - return errors.New("fields are empty") - } - - return nil -} diff --git a/protobuf/distribute/distribute.pb.go b/protobuf/distribute/distribute.pb.go index 9a8174f..d37e934 100644 --- a/protobuf/distribute/distribute.pb.go +++ b/protobuf/distribute/distribute.pb.go @@ -8,6 +8,7 @@ import ( fmt "fmt" proto "github.com/golang/protobuf/proto" any "github.com/golang/protobuf/ptypes/any" + index "github.com/mosuka/blast/protobuf/index" grpc "google.golang.org/grpc" math "math" ) @@ -206,10 +207,10 @@ func (m *GetDocumentRequest) GetId() string { } type GetDocumentResponse struct { - Fields *any.Any `protobuf:"bytes,1,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Document *index.Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *GetDocumentResponse) Reset() { *m = GetDocumentResponse{} } @@ -237,19 +238,18 @@ func (m *GetDocumentResponse) XXX_DiscardUnknown() { var xxx_messageInfo_GetDocumentResponse proto.InternalMessageInfo -func (m *GetDocumentResponse) GetFields() *any.Any { +func (m *GetDocumentResponse) GetDocument() *index.Document { if m != nil { - return m.Fields + return m.Document } return nil } type IndexDocumentRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Document *index.Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *IndexDocumentRequest) Reset() { *m = IndexDocumentRequest{} } @@ -277,16 +277,9 @@ func (m *IndexDocumentRequest) XXX_DiscardUnknown() { var xxx_messageInfo_IndexDocumentRequest proto.InternalMessageInfo -func (m *IndexDocumentRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *IndexDocumentRequest) GetFields() *any.Any { +func (m *IndexDocumentRequest) GetDocument() *index.Document { if m != nil { - return m.Fields + return m.Document } return nil } @@ -506,42 +499,42 @@ func init() { } var fileDescriptor_0b1b3e8a99d31c9c = []byte{ - // 547 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xef, 0x6f, 0xd2, 0x40, - 0x18, 0xa6, 0x6c, 0xc5, 0xf1, 0x32, 0x18, 0x39, 0xd9, 0x74, 0xfd, 0xa0, 0xf3, 0x5c, 0x22, 0x46, - 0x57, 0x12, 0x8c, 0x1f, 0x8c, 0x89, 0xa6, 0xae, 0x44, 0x88, 0x04, 0x97, 0x82, 0xc6, 0xa9, 0xc9, - 0xd2, 0x1f, 0x37, 0x68, 0x56, 0x7a, 0xd8, 0xbb, 0x26, 0xee, 0xaf, 0xf0, 0x3f, 0xf1, 0xa3, 0x7f, - 0x9f, 0x69, 0x8f, 0x62, 0xcb, 0x6a, 0xd9, 0x37, 0xde, 0xf7, 0x7d, 0x9e, 0xe7, 0x9e, 0xbb, 0xf7, - 0xa1, 0x70, 0xbc, 0x08, 0x28, 0xa7, 0x56, 0x78, 0xd9, 0x71, 0x5c, 0xc6, 0x03, 0xd7, 0x0a, 0x39, - 0x49, 0xfd, 0x54, 0xe3, 0x31, 0x82, 0x7f, 0x1d, 0xe5, 0x70, 0x4a, 0xe9, 0xd4, 0x23, 0x9d, 0x15, - 0xd1, 0xf4, 0xaf, 0x05, 0x0c, 0xff, 0x92, 0xe0, 0x60, 0x44, 0x1d, 0xd2, 0x27, 0xa6, 0xc7, 0x67, - 0xa7, 0x33, 0x62, 0x5f, 0x19, 0xe4, 0x47, 0x48, 0x18, 0x47, 0x6f, 0x40, 0x5e, 0x04, 0xd4, 0x22, - 0xf7, 0xa5, 0x23, 0xa9, 0xdd, 0xe8, 0xb6, 0xd5, 0xd4, 0x19, 0xf9, 0x14, 0xf5, 0x2c, 0xc2, 0x1b, - 0x82, 0x86, 0x5f, 0x82, 0x1c, 0xd7, 0x68, 0x0f, 0x6a, 0xfd, 0x9e, 0x36, 0x9c, 0xf4, 0x07, 0xa3, - 0xde, 0x78, 0xdc, 0x2c, 0xa1, 0x5d, 0xd8, 0x19, 0x0e, 0x3e, 0xf7, 0xe2, 0x4a, 0x42, 0x75, 0xa8, - 0x1a, 0x3d, 0x4d, 0x17, 0xc3, 0x32, 0xfe, 0x2d, 0xc1, 0xbd, 0x1b, 0xf2, 0x6c, 0x41, 0x7d, 0x46, - 0xd0, 0x5b, 0x90, 0x19, 0x37, 0x79, 0x62, 0xe9, 0x69, 0xa1, 0x25, 0xc1, 0x51, 0xc7, 0x11, 0xc1, - 0x10, 0x3c, 0x6c, 0x80, 0x1c, 0xd7, 0xa8, 0x06, 0x77, 0x84, 0xa7, 0xf3, 0x66, 0x29, 0x72, 0xf0, - 0x69, 0x94, 0x94, 0x12, 0xaa, 0x82, 0xac, 0x45, 0xfe, 0x9a, 0x65, 0xb4, 0x03, 0xdb, 0x7a, 0x4f, - 0xd3, 0x9b, 0x5b, 0x51, 0x33, 0x72, 0x79, 0xde, 0xdc, 0x8e, 0xe0, 0xa3, 0x8f, 0x93, 0x0b, 0x51, - 0xca, 0xf8, 0x18, 0xd0, 0x7b, 0xc2, 0x75, 0x6a, 0x87, 0x73, 0xe2, 0xf3, 0xe4, 0xf5, 0x1a, 0x50, - 0x76, 0x9d, 0xd8, 0x67, 0xd5, 0x28, 0xbb, 0x0e, 0x3e, 0x85, 0xbb, 0x19, 0xd4, 0xf2, 0x46, 0xcf, - 0xa1, 0x72, 0xe9, 0x12, 0xcf, 0x61, 0x31, 0xb4, 0xd6, 0x6d, 0xa9, 0x62, 0x57, 0x6a, 0xb2, 0x2b, - 0x55, 0xf3, 0xaf, 0x8d, 0x25, 0x06, 0x4f, 0xa0, 0x35, 0xf0, 0x1d, 0xf2, 0x73, 0xc3, 0x61, 0x29, - 0xd5, 0xf2, 0x2d, 0x54, 0x4f, 0x60, 0x7f, 0x4d, 0x75, 0x69, 0xae, 0x05, 0xb2, 0x4d, 0x43, 0x9f, - 0xc7, 0xca, 0xb2, 0x21, 0x0a, 0xfc, 0x04, 0xf6, 0x75, 0xe2, 0x11, 0x4e, 0x36, 0x5d, 0x59, 0x85, - 0x83, 0x75, 0x60, 0xa1, 0xf0, 0x10, 0xea, 0x63, 0x62, 0x06, 0xf6, 0x2c, 0x11, 0x7c, 0x0d, 0x0d, - 0x16, 0x37, 0x2e, 0x02, 0xd1, 0x29, 0x7c, 0xa4, 0x3a, 0x4b, 0x93, 0xf1, 0x07, 0x68, 0x24, 0x6a, - 0xcb, 0x53, 0x5f, 0x41, 0x7d, 0x25, 0xc7, 0x42, 0xaf, 0x58, 0x6d, 0x37, 0x51, 0x8b, 0x90, 0xdd, - 0x3f, 0x5b, 0x00, 0xfa, 0x2a, 0x6b, 0xe8, 0x3b, 0xec, 0xad, 0xc5, 0x0d, 0xe1, 0xcd, 0x7f, 0x0f, - 0xe5, 0xf1, 0x2d, 0xf2, 0x8a, 0x4b, 0xe8, 0x0c, 0x6a, 0xa9, 0xa8, 0xa0, 0x07, 0x69, 0xd6, 0xcd, - 0xa4, 0x29, 0x0f, 0xff, 0x3b, 0x5f, 0x29, 0x7e, 0x81, 0x7a, 0x66, 0xc3, 0xe8, 0x28, 0xcd, 0xc9, - 0x8b, 0x94, 0xf2, 0xa8, 0x00, 0x91, 0xe8, 0xb6, 0x25, 0xf4, 0x0d, 0x1a, 0xd9, 0x1d, 0xa3, 0x0c, - 0x31, 0x37, 0x28, 0x0a, 0x2e, 0x82, 0xa4, 0xc4, 0x35, 0xa8, 0x88, 0x15, 0xa2, 0xc3, 0x34, 0x23, - 0x13, 0x12, 0x45, 0xc9, 0x1b, 0x25, 0x22, 0xef, 0x4e, 0xbe, 0x3e, 0x9b, 0xba, 0x7c, 0x16, 0x5a, - 0xaa, 0x4d, 0xe7, 0x9d, 0x39, 0x65, 0xe1, 0x95, 0xd9, 0xb1, 0x3c, 0x93, 0xf1, 0x4e, 0xce, 0x67, - 0xd4, 0xaa, 0xc4, 0xcd, 0x17, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x35, 0xcf, 0xa0, 0x84, 0x64, - 0x05, 0x00, 0x00, + // 556 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x5d, 0x6f, 0xd3, 0x30, + 0x14, 0x5d, 0xba, 0x65, 0xb4, 0xb7, 0xeb, 0x87, 0x4c, 0x37, 0x58, 0x1e, 0x60, 0x98, 0x49, 0x14, + 0x4d, 0x73, 0xa5, 0x22, 0x1e, 0x10, 0x12, 0x28, 0x5b, 0x2a, 0x5a, 0x51, 0x95, 0x29, 0x1d, 0x88, + 0x01, 0xd2, 0x94, 0x34, 0xa6, 0x8d, 0xd6, 0xd6, 0x25, 0x76, 0x24, 0xf6, 0x2b, 0xf8, 0x27, 0x3c, + 0xf2, 0xfb, 0x50, 0xe2, 0x24, 0x24, 0x5d, 0xc8, 0xe0, 0xa5, 0xea, 0xbd, 0xf7, 0x9c, 0xe3, 0xe3, + 0xdc, 0x93, 0xc0, 0xe1, 0xca, 0x63, 0x82, 0xd9, 0xfe, 0xd7, 0x8e, 0xe3, 0x72, 0xe1, 0xb9, 0xb6, + 0x2f, 0x68, 0xea, 0x2f, 0x09, 0xc7, 0x08, 0xfe, 0x74, 0xb4, 0xfd, 0x29, 0x63, 0xd3, 0x39, 0xed, + 0x24, 0x44, 0x6b, 0x79, 0x2d, 0x61, 0x9a, 0x96, 0xf4, 0xdc, 0xa5, 0x43, 0xbf, 0xcb, 0x5f, 0x39, + 0xc3, 0x3f, 0x14, 0xd8, 0x1b, 0x31, 0x87, 0xf6, 0xa9, 0x35, 0x17, 0xb3, 0xd3, 0x19, 0x9d, 0x5c, + 0x99, 0xf4, 0x9b, 0x4f, 0xb9, 0x40, 0xaf, 0x40, 0x5d, 0x79, 0xcc, 0xa6, 0xf7, 0x95, 0x03, 0xa5, + 0x5d, 0xef, 0xb6, 0x49, 0xea, 0xfc, 0x7c, 0x0a, 0x39, 0x0b, 0xf0, 0xa6, 0xa4, 0xe1, 0xe7, 0xa0, + 0x86, 0x35, 0x6a, 0x40, 0xb5, 0xdf, 0xd3, 0x87, 0xe7, 0xfd, 0xc1, 0xa8, 0x37, 0x1e, 0x37, 0x37, + 0xd0, 0x0e, 0x94, 0x87, 0x83, 0x0f, 0xbd, 0xb0, 0x52, 0x50, 0x0d, 0x2a, 0x66, 0x4f, 0x37, 0xe4, + 0xb0, 0x84, 0x7f, 0x2a, 0x70, 0xef, 0x86, 0x3c, 0x5f, 0xb1, 0x25, 0xa7, 0xe8, 0x35, 0xa8, 0x5c, + 0x58, 0x22, 0xb6, 0xf4, 0xb4, 0xd0, 0x92, 0xe4, 0x90, 0x71, 0x40, 0x30, 0x25, 0x0f, 0x9b, 0xa0, + 0x86, 0x35, 0xaa, 0xc2, 0x1d, 0xe9, 0xe9, 0xa2, 0xb9, 0x11, 0x38, 0x78, 0x3f, 0x8a, 0x4b, 0x05, + 0x55, 0x40, 0xd5, 0x03, 0x7f, 0xcd, 0x12, 0x2a, 0xc3, 0x96, 0xd1, 0xd3, 0x8d, 0xe6, 0x66, 0xd0, + 0x0c, 0x5c, 0x5e, 0x34, 0xb7, 0x02, 0xf8, 0xe8, 0xdd, 0xf9, 0xa5, 0x2c, 0x55, 0x7c, 0x08, 0xe8, + 0x0d, 0x15, 0x06, 0x9b, 0xf8, 0x0b, 0xba, 0x14, 0xf1, 0xd3, 0xab, 0x43, 0xc9, 0x75, 0x42, 0x9f, + 0x15, 0xb3, 0xe4, 0x3a, 0xf8, 0x04, 0xee, 0x66, 0x50, 0xd1, 0x8d, 0x8e, 0xa0, 0xec, 0x44, 0xbd, + 0x10, 0x5c, 0xed, 0x36, 0x88, 0xdc, 0x4f, 0x02, 0x4d, 0x00, 0xf8, 0x14, 0x5a, 0x83, 0x60, 0xb6, + 0x7e, 0xd6, 0x7f, 0x89, 0x1c, 0xc3, 0xee, 0x9a, 0x48, 0x64, 0xa5, 0x05, 0xea, 0x84, 0xf9, 0x91, + 0x84, 0x6a, 0xca, 0x02, 0x3f, 0x81, 0x5d, 0x83, 0xce, 0xa9, 0xa0, 0xb7, 0x5d, 0x90, 0xc0, 0xde, + 0x3a, 0xb0, 0x50, 0x78, 0x08, 0xb5, 0x31, 0xb5, 0xbc, 0xc9, 0x2c, 0x16, 0x7c, 0x09, 0x75, 0x1e, + 0x36, 0x2e, 0x3d, 0xd9, 0x89, 0xee, 0xd2, 0x22, 0x32, 0xda, 0x24, 0x8e, 0x31, 0xd1, 0x97, 0xd7, + 0x66, 0x8d, 0xa7, 0xc9, 0xf8, 0x2d, 0xd4, 0x63, 0xb5, 0xe8, 0xd4, 0x17, 0x50, 0x4b, 0xe4, 0xb8, + 0x3f, 0x2f, 0x56, 0xdb, 0x89, 0xd5, 0x02, 0x64, 0xf7, 0xd7, 0x26, 0x80, 0x91, 0x24, 0x0b, 0x7d, + 0x81, 0xc6, 0x5a, 0xb8, 0x10, 0xbe, 0xfd, 0x65, 0xd0, 0x1e, 0xff, 0x43, 0x3a, 0xf1, 0x06, 0x3a, + 0x83, 0x6a, 0x2a, 0x18, 0xe8, 0x41, 0x9a, 0x75, 0x33, 0x57, 0xda, 0xc3, 0xbf, 0xce, 0x13, 0xc5, + 0x8f, 0x50, 0xcb, 0x6c, 0x18, 0x1d, 0xa4, 0x39, 0x79, 0x09, 0xd2, 0x1e, 0x15, 0x20, 0x62, 0xdd, + 0xb6, 0x82, 0x3e, 0x43, 0x3d, 0xbb, 0x63, 0x94, 0x21, 0xe6, 0x06, 0x45, 0xc3, 0x45, 0x90, 0x94, + 0xb8, 0x0e, 0xdb, 0x72, 0x85, 0x68, 0x3f, 0xcd, 0xc8, 0x84, 0x44, 0xd3, 0xf2, 0x46, 0xb1, 0xc8, + 0xc9, 0xf1, 0xa7, 0xa3, 0xa9, 0x2b, 0x66, 0xbe, 0x4d, 0x26, 0x6c, 0xd1, 0x59, 0x30, 0xee, 0x5f, + 0x59, 0x1d, 0x7b, 0x6e, 0x71, 0xd1, 0xc9, 0xf9, 0xa0, 0xda, 0xdb, 0x61, 0xf3, 0xd9, 0xef, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xd2, 0x53, 0x25, 0x64, 0x6e, 0x05, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/protobuf/distribute/distribute.proto b/protobuf/distribute/distribute.proto index 7f08a56..53d49d5 100644 --- a/protobuf/distribute/distribute.proto +++ b/protobuf/distribute/distribute.proto @@ -15,6 +15,7 @@ syntax = "proto3"; import "google/protobuf/any.proto"; +import "protobuf/index/index.proto"; package distribute; @@ -55,12 +56,11 @@ message GetDocumentRequest { } message GetDocumentResponse { - google.protobuf.Any fields = 1; + index.Document document = 1; } message IndexDocumentRequest { - string id = 1; - google.protobuf.Any fields = 2; + index.Document document = 1; } message IndexDocumentResponse { diff --git a/protobuf/index/index.go b/protobuf/index/index.go new file mode 100644 index 0000000..fd80b99 --- /dev/null +++ b/protobuf/index/index.go @@ -0,0 +1,62 @@ +package index + +import ( + "encoding/json" + "errors" + + "github.com/golang/protobuf/ptypes/any" + + "github.com/mosuka/blast/protobuf" +) + +func MarshalDocument(doc *Document) ([]byte, error) { + if doc == nil { + return nil, errors.New("nil") + } + + fieldsIntr, err := protobuf.MarshalAny(doc.Fields) + if err != nil { + return nil, err + } + + docMap := map[string]interface{}{ + "id": doc.Id, + "fields": *fieldsIntr.(*map[string]interface{}), + } + + docBytes, err := json.Marshal(docMap) + if err != nil { + return nil, err + } + + return docBytes, nil +} + +func UnmarshalDocument(data []byte, doc *Document) error { + var err error + + if data == nil || len(data) <= 0 || doc == nil { + return nil + } + + var docMap map[string]interface{} + err = json.Unmarshal(data, &docMap) + if err != nil { + return err + } + + if id, ok := docMap["id"].(string); ok { + doc.Id = id + } + + if fieldsMap, ok := docMap["fields"].(map[string]interface{}); ok { + fieldsAny := &any.Any{} + err = protobuf.UnmarshalAny(fieldsMap, fieldsAny) + if err != nil { + return err + } + doc.Fields = fieldsAny + } + + return nil +} diff --git a/protobuf/index/index.pb.go b/protobuf/index/index.pb.go index 56cf8d5..f37f30a 100644 --- a/protobuf/index/index.pb.go +++ b/protobuf/index/index.pb.go @@ -592,6 +592,53 @@ func (m *ClusterWatchResponse) GetCluster() *Cluster { return nil } +type Document struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} +func (*Document) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{10} +} + +func (m *Document) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Document.Unmarshal(m, b) +} +func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Document.Marshal(b, m, deterministic) +} +func (m *Document) XXX_Merge(src proto.Message) { + xxx_messageInfo_Document.Merge(m, src) +} +func (m *Document) XXX_Size() int { + return xxx_messageInfo_Document.Size(m) +} +func (m *Document) XXX_DiscardUnknown() { + xxx_messageInfo_Document.DiscardUnknown(m) +} + +var xxx_messageInfo_Document proto.InternalMessageInfo + +func (m *Document) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Document) GetFields() *any.Any { + if m != nil { + return m.Fields + } + return nil +} + type GetDocumentRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -603,7 +650,7 @@ func (m *GetDocumentRequest) Reset() { *m = GetDocumentRequest{} } func (m *GetDocumentRequest) String() string { return proto.CompactTextString(m) } func (*GetDocumentRequest) ProtoMessage() {} func (*GetDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{10} + return fileDescriptor_7b2daf652facb3ae, []int{11} } func (m *GetDocumentRequest) XXX_Unmarshal(b []byte) error { @@ -632,17 +679,17 @@ func (m *GetDocumentRequest) GetId() string { } type GetDocumentResponse struct { - Fields *any.Any `protobuf:"bytes,1,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Document *Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *GetDocumentResponse) Reset() { *m = GetDocumentResponse{} } func (m *GetDocumentResponse) String() string { return proto.CompactTextString(m) } func (*GetDocumentResponse) ProtoMessage() {} func (*GetDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{11} + return fileDescriptor_7b2daf652facb3ae, []int{12} } func (m *GetDocumentResponse) XXX_Unmarshal(b []byte) error { @@ -663,26 +710,25 @@ func (m *GetDocumentResponse) XXX_DiscardUnknown() { var xxx_messageInfo_GetDocumentResponse proto.InternalMessageInfo -func (m *GetDocumentResponse) GetFields() *any.Any { +func (m *GetDocumentResponse) GetDocument() *Document { if m != nil { - return m.Fields + return m.Document } return nil } type IndexDocumentRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Document *Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *IndexDocumentRequest) Reset() { *m = IndexDocumentRequest{} } func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) } func (*IndexDocumentRequest) ProtoMessage() {} func (*IndexDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{12} + return fileDescriptor_7b2daf652facb3ae, []int{13} } func (m *IndexDocumentRequest) XXX_Unmarshal(b []byte) error { @@ -703,16 +749,9 @@ func (m *IndexDocumentRequest) XXX_DiscardUnknown() { var xxx_messageInfo_IndexDocumentRequest proto.InternalMessageInfo -func (m *IndexDocumentRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *IndexDocumentRequest) GetFields() *any.Any { +func (m *IndexDocumentRequest) GetDocument() *Document { if m != nil { - return m.Fields + return m.Document } return nil } @@ -728,7 +767,7 @@ func (m *IndexDocumentResponse) Reset() { *m = IndexDocumentResponse{} } func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) } func (*IndexDocumentResponse) ProtoMessage() {} func (*IndexDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{13} + return fileDescriptor_7b2daf652facb3ae, []int{14} } func (m *IndexDocumentResponse) XXX_Unmarshal(b []byte) error { @@ -767,7 +806,7 @@ func (m *DeleteDocumentRequest) Reset() { *m = DeleteDocumentRequest{} } func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) } func (*DeleteDocumentRequest) ProtoMessage() {} func (*DeleteDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{14} + return fileDescriptor_7b2daf652facb3ae, []int{15} } func (m *DeleteDocumentRequest) XXX_Unmarshal(b []byte) error { @@ -806,7 +845,7 @@ func (m *DeleteDocumentResponse) Reset() { *m = DeleteDocumentResponse{} func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) } func (*DeleteDocumentResponse) ProtoMessage() {} func (*DeleteDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{15} + return fileDescriptor_7b2daf652facb3ae, []int{16} } func (m *DeleteDocumentResponse) XXX_Unmarshal(b []byte) error { @@ -845,7 +884,7 @@ func (m *SearchRequest) Reset() { *m = SearchRequest{} } func (m *SearchRequest) String() string { return proto.CompactTextString(m) } func (*SearchRequest) ProtoMessage() {} func (*SearchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{16} + return fileDescriptor_7b2daf652facb3ae, []int{17} } func (m *SearchRequest) XXX_Unmarshal(b []byte) error { @@ -884,7 +923,7 @@ func (m *SearchResponse) Reset() { *m = SearchResponse{} } func (m *SearchResponse) String() string { return proto.CompactTextString(m) } func (*SearchResponse) ProtoMessage() {} func (*SearchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{17} + return fileDescriptor_7b2daf652facb3ae, []int{18} } func (m *SearchResponse) XXX_Unmarshal(b []byte) error { @@ -925,7 +964,7 @@ func (m *IndexConfig) Reset() { *m = IndexConfig{} } func (m *IndexConfig) String() string { return proto.CompactTextString(m) } func (*IndexConfig) ProtoMessage() {} func (*IndexConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{18} + return fileDescriptor_7b2daf652facb3ae, []int{19} } func (m *IndexConfig) XXX_Unmarshal(b []byte) error { @@ -978,7 +1017,7 @@ func (m *GetIndexConfigResponse) Reset() { *m = GetIndexConfigResponse{} func (m *GetIndexConfigResponse) String() string { return proto.CompactTextString(m) } func (*GetIndexConfigResponse) ProtoMessage() {} func (*GetIndexConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{19} + return fileDescriptor_7b2daf652facb3ae, []int{20} } func (m *GetIndexConfigResponse) XXX_Unmarshal(b []byte) error { @@ -1017,7 +1056,7 @@ func (m *GetIndexStatsResponse) Reset() { *m = GetIndexStatsResponse{} } func (m *GetIndexStatsResponse) String() string { return proto.CompactTextString(m) } func (*GetIndexStatsResponse) ProtoMessage() {} func (*GetIndexStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{20} + return fileDescriptor_7b2daf652facb3ae, []int{21} } func (m *GetIndexStatsResponse) XXX_Unmarshal(b []byte) error { @@ -1045,54 +1084,6 @@ func (m *GetIndexStatsResponse) GetIndexStats() *any.Any { return nil } -// use for creating snapshot -type Document struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Document) Reset() { *m = Document{} } -func (m *Document) String() string { return proto.CompactTextString(m) } -func (*Document) ProtoMessage() {} -func (*Document) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{21} -} - -func (m *Document) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Document.Unmarshal(m, b) -} -func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Document.Marshal(b, m, deterministic) -} -func (m *Document) XXX_Merge(src proto.Message) { - xxx_messageInfo_Document.Merge(m, src) -} -func (m *Document) XXX_Size() int { - return xxx_messageInfo_Document.Size(m) -} -func (m *Document) XXX_DiscardUnknown() { - xxx_messageInfo_Document.DiscardUnknown(m) -} - -var xxx_messageInfo_Document proto.InternalMessageInfo - -func (m *Document) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *Document) GetFields() *any.Any { - if m != nil { - return m.Fields - } - return nil -} - func init() { proto.RegisterEnum("index.NodeHealthCheckRequest_Probe", NodeHealthCheckRequest_Probe_name, NodeHealthCheckRequest_Probe_value) proto.RegisterEnum("index.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) @@ -1109,6 +1100,7 @@ func init() { proto.RegisterType((*ClusterLeaveRequest)(nil), "index.ClusterLeaveRequest") proto.RegisterType((*ClusterInfoResponse)(nil), "index.ClusterInfoResponse") proto.RegisterType((*ClusterWatchResponse)(nil), "index.ClusterWatchResponse") + proto.RegisterType((*Document)(nil), "index.Document") proto.RegisterType((*GetDocumentRequest)(nil), "index.GetDocumentRequest") proto.RegisterType((*GetDocumentResponse)(nil), "index.GetDocumentResponse") proto.RegisterType((*IndexDocumentRequest)(nil), "index.IndexDocumentRequest") @@ -1120,84 +1112,84 @@ func init() { proto.RegisterType((*IndexConfig)(nil), "index.IndexConfig") proto.RegisterType((*GetIndexConfigResponse)(nil), "index.GetIndexConfigResponse") proto.RegisterType((*GetIndexStatsResponse)(nil), "index.GetIndexStatsResponse") - proto.RegisterType((*Document)(nil), "index.Document") } func init() { proto.RegisterFile("protobuf/index/index.proto", fileDescriptor_7b2daf652facb3ae) } var fileDescriptor_7b2daf652facb3ae = []byte{ - // 1129 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x5f, 0x73, 0xda, 0xc6, - 0x17, 0x45, 0x80, 0x6c, 0x7c, 0x65, 0x08, 0xbf, 0x8d, 0xed, 0x24, 0x24, 0xfe, 0x35, 0xde, 0xa6, - 0x0d, 0x33, 0x6d, 0xa1, 0xe3, 0x8c, 0x27, 0x4d, 0xda, 0x4e, 0x87, 0x80, 0x62, 0x13, 0x13, 0xe1, - 0x0a, 0x1c, 0x4f, 0xfa, 0xe2, 0x11, 0xb0, 0x06, 0x8d, 0x41, 0x52, 0xd1, 0xe2, 0x29, 0x8f, 0x7d, - 0xed, 0x7b, 0xbf, 0x43, 0xfb, 0x71, 0xfa, 0xde, 0x0f, 0xd3, 0xd9, 0x3f, 0x92, 0x25, 0xd9, 0xc8, - 0x9d, 0xe9, 0x8b, 0xc7, 0x7b, 0xef, 0xb9, 0x67, 0xcf, 0xde, 0xbd, 0x7b, 0x64, 0x43, 0xc5, 0x9b, - 0xbb, 0xd4, 0x1d, 0x2c, 0x2e, 0xea, 0xb6, 0x33, 0x22, 0xbf, 0x88, 0x9f, 0x35, 0x1e, 0x44, 0x2a, - 0x5f, 0x54, 0x1e, 0x8d, 0x5d, 0x77, 0x3c, 0x25, 0xf5, 0x10, 0x69, 0x39, 0x4b, 0x81, 0xa8, 0x3c, - 0x4e, 0xa6, 0xc8, 0xcc, 0xa3, 0x32, 0x89, 0x7f, 0x53, 0x60, 0xc7, 0x70, 0x47, 0xe4, 0x88, 0x58, - 0x53, 0x3a, 0x69, 0x4e, 0xc8, 0xf0, 0xd2, 0x24, 0x3f, 0x2f, 0x88, 0x4f, 0xd1, 0x2b, 0x50, 0xbd, - 0xb9, 0x3b, 0x20, 0x0f, 0x95, 0xa7, 0x4a, 0xb5, 0xb4, 0xff, 0x69, 0x4d, 0x6c, 0x7b, 0x3b, 0xba, - 0x76, 0xc2, 0xa0, 0xa6, 0xa8, 0xc0, 0x07, 0xa0, 0xf2, 0x35, 0xba, 0x07, 0xda, 0x91, 0xde, 0xe8, - 0xf4, 0x8f, 0xda, 0x86, 0xde, 0xeb, 0x95, 0x33, 0x68, 0x13, 0x0a, 0x9d, 0xf6, 0x07, 0x9d, 0xaf, - 0x14, 0x54, 0x84, 0x0d, 0x53, 0x6f, 0xb4, 0x44, 0x32, 0x8b, 0xff, 0x54, 0xe0, 0xc1, 0x0d, 0x7a, - 0xdf, 0x73, 0x1d, 0x9f, 0xa0, 0xd7, 0xa0, 0xfa, 0xd4, 0xa2, 0x81, 0x9a, 0x67, 0xab, 0xd4, 0x08, - 0x78, 0xad, 0xc7, 0xb0, 0xa6, 0x28, 0xc1, 0x26, 0xa8, 0x7c, 0x8d, 0x34, 0x58, 0x17, 0x72, 0x3e, - 0x96, 0x33, 0x6c, 0xf3, 0x53, 0x23, 0x58, 0x2a, 0x68, 0x03, 0xd4, 0x06, 0x93, 0x56, 0xce, 0xa2, - 0x02, 0xe4, 0x5b, 0x7a, 0xa3, 0x55, 0xce, 0xb1, 0x20, 0x13, 0xf8, 0xb1, 0x9c, 0x67, 0x70, 0xa3, - 0xdb, 0x3f, 0x17, 0x4b, 0x15, 0x9f, 0x40, 0xe1, 0x3d, 0xa1, 0xd6, 0xc8, 0xa2, 0x16, 0xda, 0x83, - 0xcd, 0xf1, 0xdc, 0x1b, 0x9e, 0x5b, 0xa3, 0xd1, 0x9c, 0xf8, 0x3e, 0x97, 0xb8, 0x61, 0x6a, 0x2c, - 0xd6, 0x10, 0x21, 0x06, 0x99, 0x50, 0xea, 0x85, 0x90, 0xac, 0x80, 0xb0, 0x98, 0x84, 0xe0, 0xbf, - 0x15, 0xc8, 0xb3, 0xe3, 0xa0, 0x12, 0x64, 0xed, 0x91, 0x24, 0xc9, 0xda, 0x23, 0x56, 0x3b, 0xb0, - 0x9d, 0x51, 0xb2, 0x96, 0xc5, 0x02, 0xfa, 0xe7, 0x41, 0x77, 0x72, 0xbc, 0x3b, 0xff, 0x8b, 0x74, - 0x27, 0xd6, 0x0a, 0xf4, 0x05, 0x14, 0x66, 0x52, 0xf6, 0xc3, 0xfc, 0x53, 0xa5, 0xaa, 0xed, 0xdf, - 0x93, 0xd8, 0xe0, 0x34, 0x66, 0x08, 0xc0, 0xc7, 0x91, 0xbe, 0x9d, 0x1a, 0xc7, 0x46, 0xf7, 0xcc, - 0x10, 0x57, 0xf8, 0xb6, 0xdb, 0xe9, 0x74, 0xcf, 0x74, 0x53, 0x5c, 0x61, 0xb3, 0x61, 0xb4, 0xda, - 0xad, 0x46, 0x9f, 0xb5, 0x0e, 0x60, 0xad, 0xa3, 0x37, 0x5a, 0xba, 0x59, 0xce, 0x31, 0x60, 0xef, - 0xe8, 0xb4, 0xdf, 0x62, 0x65, 0x79, 0xfc, 0xab, 0x02, 0xeb, 0xcd, 0xe9, 0xc2, 0xa7, 0x64, 0x8e, - 0xea, 0xa0, 0x3a, 0xee, 0x88, 0xb0, 0x4e, 0xe5, 0xaa, 0xda, 0xfe, 0x23, 0x29, 0x41, 0xa6, 0xb9, - 0x6c, 0x5f, 0x77, 0xe8, 0x7c, 0x69, 0x0a, 0x5c, 0x45, 0x07, 0xb8, 0x0e, 0xa2, 0x32, 0xe4, 0x2e, - 0xc9, 0x52, 0x76, 0x88, 0xfd, 0x8a, 0xf6, 0x40, 0xbd, 0xb2, 0xa6, 0x0b, 0xc2, 0x7b, 0xa3, 0xed, - 0x6b, 0x91, 0xf3, 0x9b, 0x22, 0xf3, 0x3a, 0xfb, 0x8d, 0x82, 0x5f, 0x40, 0x99, 0x85, 0xda, 0xce, - 0x85, 0x1b, 0x0e, 0xd6, 0x27, 0x90, 0x67, 0x7b, 0x70, 0xb6, 0x44, 0x25, 0x4f, 0xe0, 0x03, 0x40, - 0x52, 0xd8, 0x3b, 0xd7, 0x76, 0x82, 0xd7, 0x71, 0x67, 0xd9, 0x67, 0x70, 0x5f, 0x96, 0x75, 0x88, - 0x75, 0x45, 0x82, 0xba, 0xc4, 0xe5, 0xe2, 0x1f, 0x42, 0x58, 0x4c, 0x55, 0x15, 0xd6, 0x87, 0x22, - 0x2c, 0x77, 0x28, 0xc5, 0x7b, 0x64, 0x06, 0x69, 0xfc, 0x97, 0x02, 0x5b, 0x32, 0x78, 0x66, 0xd1, - 0xe1, 0x24, 0xa4, 0x78, 0x09, 0x2a, 0xb9, 0x22, 0x0e, 0x95, 0x2f, 0x66, 0x2f, 0x4e, 0x10, 0xc3, - 0xd6, 0x74, 0x06, 0x34, 0x05, 0x3e, 0x3c, 0x5a, 0x76, 0xc5, 0xd1, 0xa2, 0xe2, 0x72, 0xe9, 0xe2, - 0x0e, 0x40, 0xe5, 0xd4, 0xf1, 0x09, 0x2a, 0x40, 0xfe, 0x5d, 0xb7, 0x6d, 0x88, 0x47, 0xd7, 0xd1, - 0x1b, 0x1f, 0xe4, 0xe4, 0x9c, 0x9e, 0xf0, 0x29, 0xca, 0xe1, 0x67, 0x80, 0x0e, 0x09, 0x6d, 0xb9, - 0xc3, 0xc5, 0x8c, 0xe9, 0x5a, 0xd1, 0xba, 0x26, 0xdc, 0x8f, 0xa1, 0xe4, 0xb9, 0xbf, 0x84, 0xb5, - 0x0b, 0x9b, 0x4c, 0x47, 0xbe, 0xec, 0xdc, 0x56, 0x4d, 0x18, 0x60, 0x2d, 0x30, 0xc0, 0x5a, 0xc3, - 0x59, 0x9a, 0x12, 0x83, 0xfb, 0xb0, 0xd5, 0x66, 0xda, 0xef, 0xd8, 0x2c, 0xc2, 0x9a, 0xfd, 0x17, - 0xac, 0x5f, 0xc1, 0x76, 0x82, 0x55, 0x8a, 0xdb, 0x02, 0x75, 0xe8, 0x2e, 0xe4, 0xa5, 0xa8, 0xa6, - 0x58, 0xe0, 0xe7, 0xb0, 0xdd, 0x22, 0x53, 0x42, 0xc9, 0x5d, 0x47, 0xae, 0xc1, 0x4e, 0x12, 0x98, - 0x4a, 0xdc, 0x81, 0x62, 0x8f, 0x58, 0x73, 0x76, 0xd3, 0x82, 0xf0, 0x5b, 0x28, 0xf9, 0x3c, 0x70, - 0x3e, 0x17, 0x91, 0xd4, 0x26, 0x15, 0xfd, 0x68, 0x31, 0x3e, 0x86, 0x52, 0xc0, 0x26, 0x77, 0x7d, - 0x05, 0xc5, 0x90, 0xce, 0x5f, 0x4c, 0xd3, 0xd9, 0x36, 0x03, 0x36, 0x86, 0xc4, 0xbf, 0x2b, 0xa0, - 0xf1, 0x1e, 0x35, 0x5d, 0xe7, 0xc2, 0x1e, 0x33, 0x2a, 0x3e, 0x44, 0xe7, 0x33, 0xcb, 0xf3, 0x6c, - 0x67, 0x9c, 0x4e, 0xc5, 0xa1, 0xef, 0x05, 0x12, 0xed, 0x02, 0x88, 0x52, 0xba, 0xf4, 0x88, 0xb4, - 0xc7, 0x0d, 0x1e, 0xe9, 0x2f, 0x3d, 0x36, 0x10, 0x48, 0xa4, 0x7d, 0xea, 0xce, 0xad, 0x31, 0x11, - 0xb0, 0x1c, 0x87, 0x95, 0x79, 0xa6, 0x27, 0x12, 0x0c, 0x8d, 0xbb, 0xb0, 0x73, 0x48, 0x68, 0x44, - 0x59, 0x78, 0xd8, 0x03, 0x10, 0xdb, 0x9e, 0x0f, 0x79, 0x5c, 0x0a, 0x44, 0x72, 0xf6, 0xa3, 0x15, - 0x9a, 0x7d, 0xbd, 0xc0, 0x06, 0x6c, 0x07, 0x84, 0xcc, 0x4d, 0xfd, 0x08, 0x9f, 0x16, 0xe8, 0xb2, - 0x68, 0xfa, 0xb4, 0x82, 0x1d, 0x96, 0xe3, 0x23, 0x28, 0x04, 0xb7, 0xff, 0xdf, 0xa6, 0x74, 0xff, - 0x8f, 0x75, 0x50, 0xb9, 0x2e, 0x64, 0xc2, 0xbd, 0xc4, 0x97, 0x14, 0xed, 0xa6, 0x7e, 0xef, 0x2b, - 0xff, 0x4f, 0xff, 0x00, 0xe3, 0x0c, 0xfa, 0x1e, 0x0a, 0x81, 0xd9, 0xa2, 0x9d, 0x1b, 0x3a, 0x74, - 0xf6, 0x47, 0x48, 0xe5, 0x41, 0x84, 0x25, 0xea, 0x7f, 0x38, 0x83, 0xde, 0x80, 0x16, 0xb1, 0x5d, - 0x94, 0xf8, 0x46, 0x44, 0xac, 0xb8, 0xb2, 0x82, 0x1c, 0x67, 0x50, 0x0b, 0x36, 0xa3, 0x1e, 0x8c, - 0x2a, 0x71, 0x92, 0xa8, 0x31, 0xa7, 0xb0, 0x34, 0x43, 0x25, 0xa9, 0x67, 0x49, 0x90, 0x27, 0x8e, - 0x73, 0x18, 0x4a, 0xe1, 0xce, 0xbb, 0x92, 0xe5, 0x71, 0x8a, 0x4d, 0xe3, 0xcc, 0xd7, 0x0a, 0x7a, - 0x0b, 0x5a, 0xc4, 0xf5, 0xc2, 0xbe, 0xdc, 0xf4, 0xcb, 0x50, 0xd0, 0x2d, 0x26, 0x89, 0x33, 0xc8, - 0x80, 0x62, 0xcc, 0xa2, 0xd0, 0xe3, 0xe8, 0x20, 0x27, 0xb9, 0x9e, 0xdc, 0x9e, 0x0c, 0xd8, 0xaa, - 0x0a, 0xfa, 0x11, 0x4a, 0x71, 0x6b, 0x42, 0x41, 0xcd, 0xad, 0xd6, 0x56, 0xd9, 0x5d, 0x91, 0x8d, - 0x50, 0xbe, 0x84, 0x35, 0xe1, 0x37, 0x68, 0x4b, 0x82, 0x63, 0x66, 0x56, 0xd9, 0x4e, 0x44, 0xc3, - 0xb3, 0xb5, 0xa1, 0x14, 0x7f, 0xc3, 0x2b, 0xdb, 0xbd, 0x7b, 0xdd, 0xa3, 0x5b, 0x9e, 0x3c, 0xbf, - 0xb7, 0x62, 0xec, 0xf5, 0xae, 0x64, 0x7a, 0x92, 0x60, 0x8a, 0xbd, 0x75, 0x9c, 0x41, 0xdf, 0x41, - 0xa1, 0xe7, 0x58, 0x9e, 0x3f, 0x71, 0xe9, 0x4a, 0x8e, 0x95, 0x33, 0xf8, 0xa6, 0xfa, 0xd3, 0xe7, - 0x63, 0x9b, 0x4e, 0x16, 0x83, 0xda, 0xd0, 0x9d, 0xd5, 0x67, 0xae, 0xbf, 0xb8, 0xb4, 0xea, 0x83, - 0xa9, 0xe5, 0xd3, 0x7a, 0xfc, 0x9f, 0x83, 0xc1, 0x1a, 0x5f, 0xbf, 0xf8, 0x27, 0x00, 0x00, 0xff, - 0xff, 0xbf, 0x2f, 0x50, 0x92, 0x35, 0x0c, 0x00, 0x00, + // 1137 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x57, 0x5d, 0x73, 0xda, 0x46, + 0x14, 0x45, 0x80, 0x6c, 0x7c, 0x65, 0x08, 0xdd, 0xd8, 0x4e, 0x42, 0xe2, 0x36, 0xd9, 0xa6, 0x0d, + 0x33, 0x49, 0xa1, 0xe3, 0x8c, 0x27, 0x4d, 0xda, 0x4e, 0x07, 0x83, 0x62, 0x13, 0x13, 0xe1, 0x0a, + 0x3b, 0x9e, 0xf4, 0xc5, 0x23, 0xd0, 0x1a, 0x34, 0x06, 0x49, 0x45, 0x8b, 0xa7, 0x3c, 0xf6, 0xb5, + 0xef, 0xfd, 0x0f, 0xed, 0xcf, 0xe9, 0x7b, 0x7f, 0x4c, 0x67, 0x3f, 0x24, 0x4b, 0x32, 0x90, 0xe9, + 0x8b, 0xc7, 0x7b, 0xef, 0xb9, 0x67, 0xcf, 0xbd, 0xbb, 0x7b, 0x64, 0x43, 0xc5, 0x9f, 0x7a, 0xd4, + 0xeb, 0xcf, 0x2e, 0xeb, 0x8e, 0x6b, 0x93, 0xdf, 0xc4, 0xcf, 0x1a, 0x0f, 0x22, 0x95, 0x2f, 0x2a, + 0x0f, 0x86, 0x9e, 0x37, 0x1c, 0x93, 0x7a, 0x84, 0xb4, 0xdc, 0xb9, 0x40, 0x54, 0x1e, 0xa6, 0x53, + 0x64, 0xe2, 0x53, 0x99, 0xc4, 0x7f, 0x28, 0xb0, 0x63, 0x78, 0x36, 0x39, 0x22, 0xd6, 0x98, 0x8e, + 0x9a, 0x23, 0x32, 0xb8, 0x32, 0xc9, 0xaf, 0x33, 0x12, 0x50, 0xf4, 0x1a, 0x54, 0x7f, 0xea, 0xf5, + 0xc9, 0x7d, 0xe5, 0xb1, 0x52, 0x2d, 0xed, 0x7d, 0x59, 0x13, 0xdb, 0x2e, 0x46, 0xd7, 0x4e, 0x18, + 0xd4, 0x14, 0x15, 0x78, 0x1f, 0x54, 0xbe, 0x46, 0x77, 0x40, 0x3b, 0xd2, 0x1b, 0x9d, 0xd3, 0xa3, + 0xb6, 0xa1, 0xf7, 0x7a, 0xe5, 0x0c, 0xda, 0x84, 0x42, 0xa7, 0xfd, 0x41, 0xe7, 0x2b, 0x05, 0x15, + 0x61, 0xc3, 0xd4, 0x1b, 0x2d, 0x91, 0xcc, 0xe2, 0xbf, 0x15, 0xb8, 0x77, 0x8b, 0x3e, 0xf0, 0x3d, + 0x37, 0x20, 0xe8, 0x0d, 0xa8, 0x01, 0xb5, 0x68, 0xa8, 0xe6, 0xe9, 0x32, 0x35, 0x02, 0x5e, 0xeb, + 0x31, 0xac, 0x29, 0x4a, 0xb0, 0x09, 0x2a, 0x5f, 0x23, 0x0d, 0xd6, 0x85, 0x9c, 0x8f, 0xe5, 0x0c, + 0xdb, 0xfc, 0xcc, 0x08, 0x97, 0x0a, 0xda, 0x00, 0xb5, 0xc1, 0xa4, 0x95, 0xb3, 0xa8, 0x00, 0xf9, + 0x96, 0xde, 0x68, 0x95, 0x73, 0x2c, 0xc8, 0x04, 0x7e, 0x2c, 0xe7, 0x19, 0xdc, 0xe8, 0x9e, 0x5e, + 0x88, 0xa5, 0x8a, 0x4f, 0xa0, 0xf0, 0x9e, 0x50, 0xcb, 0xb6, 0xa8, 0x85, 0x9e, 0xc0, 0xe6, 0x70, + 0xea, 0x0f, 0x2e, 0x2c, 0xdb, 0x9e, 0x92, 0x20, 0xe0, 0x12, 0x37, 0x4c, 0x8d, 0xc5, 0x1a, 0x22, + 0xc4, 0x20, 0x23, 0x4a, 0xfd, 0x08, 0x92, 0x15, 0x10, 0x16, 0x93, 0x10, 0xfc, 0xaf, 0x02, 0x79, + 0xd6, 0x0e, 0x2a, 0x41, 0xd6, 0xb1, 0x25, 0x49, 0xd6, 0xb1, 0x59, 0x6d, 0xdf, 0x71, 0xed, 0x74, + 0x2d, 0x8b, 0x85, 0xf4, 0xcf, 0xc2, 0xe9, 0xe4, 0xf8, 0x74, 0x3e, 0x8b, 0x4d, 0x27, 0x31, 0x0a, + 0xf4, 0x1c, 0x0a, 0x13, 0x29, 0xfb, 0x7e, 0xfe, 0xb1, 0x52, 0xd5, 0xf6, 0xee, 0x48, 0x6c, 0xd8, + 0x8d, 0x19, 0x01, 0xf0, 0x71, 0x6c, 0x6e, 0x67, 0xc6, 0xb1, 0xd1, 0x3d, 0x37, 0xc4, 0x11, 0xbe, + 0xed, 0x76, 0x3a, 0xdd, 0x73, 0xdd, 0x14, 0x47, 0xd8, 0x6c, 0x18, 0xad, 0x76, 0xab, 0x71, 0xca, + 0x46, 0x07, 0xb0, 0xd6, 0xd1, 0x1b, 0x2d, 0xdd, 0x2c, 0xe7, 0x18, 0xb0, 0x77, 0x74, 0x76, 0xda, + 0x62, 0x65, 0x79, 0xfc, 0xbb, 0x02, 0xeb, 0xcd, 0xf1, 0x2c, 0xa0, 0x64, 0x8a, 0xea, 0xa0, 0xba, + 0x9e, 0x4d, 0xd8, 0xa4, 0x72, 0x55, 0x6d, 0xef, 0x81, 0x94, 0x20, 0xd3, 0x5c, 0x76, 0xa0, 0xbb, + 0x74, 0x3a, 0x37, 0x05, 0xae, 0xa2, 0x03, 0xdc, 0x04, 0x51, 0x19, 0x72, 0x57, 0x64, 0x2e, 0x27, + 0xc4, 0x7e, 0x45, 0x4f, 0x40, 0xbd, 0xb6, 0xc6, 0x33, 0xc2, 0x67, 0xa3, 0xed, 0x69, 0xb1, 0xfe, + 0x4d, 0x91, 0x79, 0x93, 0xfd, 0x4e, 0xc1, 0x2f, 0xa1, 0xcc, 0x42, 0x6d, 0xf7, 0xd2, 0x8b, 0x2e, + 0xd6, 0x17, 0x90, 0x67, 0x7b, 0x70, 0xb6, 0x54, 0x25, 0x4f, 0xe0, 0x7d, 0x40, 0x52, 0xd8, 0x3b, + 0xcf, 0x71, 0xc3, 0xd7, 0xf1, 0xc9, 0xb2, 0xaf, 0xe0, 0xae, 0x2c, 0xeb, 0x10, 0xeb, 0x9a, 0x84, + 0x75, 0xa9, 0xc3, 0xc5, 0x3f, 0x45, 0xb0, 0x84, 0xaa, 0x2a, 0xac, 0x0f, 0x44, 0x58, 0xee, 0x50, + 0x4a, 0xce, 0xc8, 0x0c, 0xd3, 0xf8, 0x1f, 0x05, 0xb6, 0x64, 0xf0, 0xdc, 0xa2, 0x83, 0x51, 0x44, + 0xf1, 0x0a, 0x54, 0x72, 0x4d, 0x5c, 0x2a, 0x5f, 0xcc, 0x93, 0x24, 0x41, 0x02, 0x5b, 0xd3, 0x19, + 0xd0, 0x14, 0xf8, 0xa8, 0xb5, 0xec, 0x92, 0xd6, 0xe2, 0xe2, 0x72, 0xab, 0xc5, 0xed, 0x83, 0xca, + 0xa9, 0x93, 0x37, 0xa8, 0x00, 0xf9, 0x77, 0xdd, 0xb6, 0x21, 0x1e, 0x5d, 0x47, 0x6f, 0x7c, 0x90, + 0x37, 0xe7, 0xec, 0x84, 0xdf, 0xa2, 0x1c, 0x3e, 0x82, 0x42, 0xcb, 0x1b, 0xcc, 0x26, 0xac, 0x32, + 0xfd, 0x1a, 0x5e, 0xc0, 0xda, 0xa5, 0x43, 0xc6, 0x76, 0x20, 0xf5, 0x6d, 0xd5, 0x84, 0xbf, 0xd5, + 0x42, 0x7f, 0xab, 0x35, 0xdc, 0xb9, 0x29, 0x31, 0xf8, 0x29, 0xa0, 0x43, 0x42, 0x43, 0xb2, 0x65, + 0x87, 0x70, 0x00, 0x77, 0x13, 0x28, 0x39, 0xc1, 0xe7, 0x50, 0xb0, 0x65, 0x4c, 0x9e, 0x42, 0xf8, + 0x58, 0x22, 0x68, 0x04, 0xc0, 0x4d, 0xd8, 0x6a, 0xb3, 0x5c, 0x7a, 0xaf, 0xff, 0x45, 0xf2, 0x0d, + 0x6c, 0xa7, 0x48, 0xa4, 0x94, 0x2d, 0x50, 0x07, 0xde, 0x4c, 0x52, 0xa8, 0xa6, 0x58, 0xe0, 0x67, + 0xb0, 0xdd, 0x22, 0x63, 0x42, 0xc9, 0xa7, 0x1a, 0xac, 0xc1, 0x4e, 0x1a, 0xb8, 0x92, 0xb8, 0x03, + 0xc5, 0x1e, 0xb1, 0xa6, 0xec, 0x86, 0x08, 0xc2, 0xef, 0xa1, 0x14, 0xf0, 0xc0, 0xc5, 0x54, 0x44, + 0x64, 0x2f, 0x8b, 0xa7, 0x5f, 0x0c, 0xe2, 0xc5, 0xf8, 0x18, 0x4a, 0x21, 0x9b, 0xdc, 0xf5, 0x35, + 0x14, 0x23, 0xba, 0x60, 0x36, 0x5e, 0xcd, 0xb6, 0x19, 0xb2, 0x31, 0x24, 0xfe, 0x53, 0x01, 0x8d, + 0xcf, 0xa8, 0xe9, 0xb9, 0x97, 0xce, 0x90, 0x51, 0xf1, 0x71, 0x5e, 0x4c, 0x2c, 0xdf, 0x77, 0xdc, + 0xe1, 0x6a, 0x2a, 0x0e, 0x7d, 0x2f, 0x90, 0x68, 0x17, 0x40, 0x94, 0xd2, 0xb9, 0x4f, 0xa4, 0xad, + 0x6e, 0xf0, 0xc8, 0xe9, 0xdc, 0x27, 0xe8, 0x05, 0x20, 0x91, 0x0e, 0xa8, 0x37, 0xb5, 0x86, 0x44, + 0xc0, 0x72, 0x1c, 0x56, 0xe6, 0x99, 0x9e, 0x48, 0x30, 0x34, 0xee, 0xc2, 0xce, 0x21, 0xa1, 0x31, + 0x65, 0x51, 0xb3, 0xfb, 0x20, 0xb6, 0xbd, 0x18, 0xf0, 0xb8, 0x14, 0x88, 0xe4, 0x2d, 0x88, 0x57, + 0x68, 0xce, 0xcd, 0x02, 0x1b, 0xb0, 0x1d, 0x12, 0x32, 0x17, 0x0e, 0x62, 0x7c, 0x5a, 0xa8, 0xcb, + 0xa2, 0xc1, 0xca, 0x7e, 0xc1, 0x89, 0xca, 0xf7, 0xfe, 0x5a, 0x07, 0x95, 0xb3, 0x21, 0x13, 0xee, + 0xa4, 0xbe, 0x9b, 0x68, 0x77, 0xe5, 0xd7, 0xbd, 0xf2, 0xf9, 0xea, 0xcf, 0x2d, 0xce, 0xa0, 0x1f, + 0xa1, 0x10, 0x5a, 0x2b, 0xda, 0xb9, 0xa5, 0x45, 0x67, 0x7f, 0x72, 0x54, 0xee, 0xc5, 0x58, 0xe2, + 0x6e, 0x87, 0x33, 0xe8, 0x00, 0xb4, 0x98, 0xc9, 0xa2, 0xd4, 0x17, 0x21, 0x66, 0xbc, 0x95, 0x25, + 0xe4, 0x38, 0x83, 0x5a, 0xb0, 0x19, 0x77, 0x5c, 0x54, 0x49, 0x92, 0xc4, 0x6d, 0x78, 0x05, 0x4b, + 0x33, 0x52, 0xb2, 0xb2, 0x97, 0x14, 0x79, 0xaa, 0x9d, 0xc3, 0x48, 0x0a, 0xf7, 0xd9, 0xa5, 0x2c, + 0x0f, 0x57, 0x98, 0x32, 0xce, 0x7c, 0xab, 0xa0, 0xb7, 0xa0, 0xc5, 0x9c, 0x29, 0x9a, 0xcb, 0x6d, + 0x4f, 0x8b, 0x04, 0x2d, 0x30, 0x32, 0x9c, 0x41, 0x06, 0x14, 0x13, 0xc6, 0x82, 0x1e, 0xc6, 0xaf, + 0x5f, 0x9a, 0xeb, 0xd1, 0xe2, 0x64, 0xc8, 0x56, 0x55, 0xd0, 0xcf, 0x50, 0x4a, 0x1a, 0x0a, 0x0a, + 0x6b, 0x16, 0x1a, 0x52, 0x65, 0x77, 0x49, 0x36, 0x46, 0xf9, 0x0a, 0xd6, 0x84, 0x4b, 0xa0, 0x2d, + 0x09, 0x4e, 0x58, 0x50, 0x65, 0x3b, 0x15, 0x8d, 0x7a, 0x6b, 0x43, 0x29, 0xf9, 0xf2, 0x96, 0x8e, + 0x7b, 0xf7, 0x66, 0x46, 0x0b, 0x1e, 0x2a, 0x3f, 0xb7, 0x62, 0xe2, 0xcd, 0x2d, 0x65, 0x7a, 0x94, + 0x62, 0x4a, 0xbc, 0x50, 0x9c, 0x41, 0x3f, 0x40, 0xa1, 0xe7, 0x5a, 0x7e, 0x30, 0xf2, 0xe8, 0x52, + 0x8e, 0xa5, 0x77, 0xf0, 0xa0, 0xfa, 0xcb, 0xd7, 0x43, 0x87, 0x8e, 0x66, 0xfd, 0xda, 0xc0, 0x9b, + 0xd4, 0x27, 0x5e, 0x30, 0xbb, 0xb2, 0xea, 0xfd, 0xb1, 0x15, 0xd0, 0x7a, 0xf2, 0x5f, 0x81, 0xfe, + 0x1a, 0x5f, 0xbf, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x14, 0x4f, 0xc0, 0x27, 0x23, 0x0c, 0x00, + 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/protobuf/index/index.proto b/protobuf/index/index.proto index 3edab24..0943a3b 100644 --- a/protobuf/index/index.proto +++ b/protobuf/index/index.proto @@ -111,17 +111,21 @@ message ClusterWatchResponse { Cluster cluster = 3; } +message Document { + string id = 1; + google.protobuf.Any fields = 2; +} + message GetDocumentRequest { string id = 1; } message GetDocumentResponse { - google.protobuf.Any fields = 1; + Document document = 1; } message IndexDocumentRequest { - string id = 1; - google.protobuf.Any fields = 2; + Document document = 1; } message IndexDocumentResponse { @@ -158,8 +162,3 @@ message GetIndexStatsResponse { google.protobuf.Any index_stats = 1; } -// use for creating snapshot -message Document { - string id = 1; - google.protobuf.Any fields = 2; -} diff --git a/testutils/testutils.go b/testutils/testutils.go index 758e804..9e0ec2c 100644 --- a/testutils/testutils.go +++ b/testutils/testutils.go @@ -17,9 +17,6 @@ package testutils import ( "io/ioutil" "net" - - "github.com/mosuka/blast/config" - "github.com/mosuka/blast/indexutils" ) func TmpDir() string { @@ -44,29 +41,3 @@ func TmpPort() int { return l.Addr().(*net.TCPAddr).Port } - -//func TmpNodeConfig() *config.NodeConfig { -// c := config.DefaultNodeConfig() -// -// c.BindAddr = fmt.Sprintf(":%d", TmpPort()) -// c.GRPCAddr = fmt.Sprintf(":%d", TmpPort()) -// c.HTTPAddr = fmt.Sprintf(":%d", TmpPort()) -// c.DataDir = TmpDir() -// -// return c -//} - -func TmpIndexConfig(indexMappingFile string, indexType string, indexStorageType string) (*config.IndexConfig, error) { - indexMapping, err := indexutils.NewIndexMappingFromFile(indexMappingFile) - if err != nil { - return config.DefaultIndexConfig(), err - } - - indexConfig := &config.IndexConfig{ - IndexMapping: indexMapping, - IndexType: indexType, - IndexStorageType: indexStorageType, - } - - return indexConfig, nil -} From de4520ba6715087daf947285b69db71ba61fab4c Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 6 Aug 2019 19:50:27 +0900 Subject: [PATCH 27/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index 5141ff9..6fc49a6 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -24,6 +24,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Change protobuf for indexer and dispatcher #95 - Change server arguments #96 - Change index protobuf #97 +- Use protobuf document #98 ## [v0.7.1] - 2019-07-18 From 6342af0c907601885f2ebeeeb131f1cf2f284248 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 6 Aug 2019 21:24:06 +0900 Subject: [PATCH 28/76] Change node state to Node_SHUTDOWN in a error (#99) --- indexer/grpc_service.go | 2 +- manager/grpc_service.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go index 2161fda..8f6598e 100644 --- a/indexer/grpc_service.go +++ b/indexer/grpc_service.go @@ -594,7 +594,7 @@ func (s *GRPCService) getPeerNode(id string) (*index.Node, error) { s.logger.Debug(err.Error(), zap.String("id", id)) return &index.Node{ BindAddress: "", - State: index.Node_UNKNOWN, + State: index.Node_SHUTDOWN, Metadata: &index.Metadata{ GrpcAddress: "", HttpAddress: "", diff --git a/manager/grpc_service.go b/manager/grpc_service.go index 6bccc35..c0745ae 100644 --- a/manager/grpc_service.go +++ b/manager/grpc_service.go @@ -330,7 +330,7 @@ func (s *GRPCService) getPeerNode(id string) (*management.Node, error) { s.logger.Debug(err.Error(), zap.String("id", id)) return &management.Node{ BindAddress: "", - State: management.Node_UNKNOWN, + State: management.Node_SHUTDOWN, Metadata: &management.Metadata{ GrpcAddress: "", HttpAddress: "", From 7d2b2c2fe25f4f4540740c272a4f5c262516a9ee Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 6 Aug 2019 21:24:35 +0900 Subject: [PATCH 29/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index 6fc49a6..f04569d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -25,6 +25,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Change server arguments #96 - Change index protobuf #97 - Use protobuf document #98 +- Change node state to Node_SHUTDOWN in a error #99 ## [v0.7.1] - 2019-07-18 From e468a1ec43d4e84610555783d66aadbe299dd41a Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 6 Aug 2019 23:50:00 +0900 Subject: [PATCH 30/76] Fix a bug for waiting to receive an indexer cluster updates from the stream (#100) --- dispatcher/grpc_service.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dispatcher/grpc_service.go b/dispatcher/grpc_service.go index 309b6d6..e49e63d 100644 --- a/dispatcher/grpc_service.go +++ b/dispatcher/grpc_service.go @@ -94,12 +94,12 @@ func (s *GRPCService) Start() error { } func (s *GRPCService) Stop() error { - s.logger.Info("stop to update indexer cluster info") - s.stopUpdateIndexers() - s.logger.Info("stop to update manager cluster info") s.stopUpdateManagers() + s.logger.Info("stop to update indexer cluster info") + s.stopUpdateIndexers() + return nil } From 4eee470c0f5da871da4feb002b31328b3d3f13fd Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 6 Aug 2019 23:50:32 +0900 Subject: [PATCH 31/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index f04569d..da3402e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -26,6 +26,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Change index protobuf #97 - Use protobuf document #98 - Change node state to Node_SHUTDOWN in a error #99 +- Fix a bug for waiting to receive an indexer cluster updates from the stream #100 ## [v0.7.1] - 2019-07-18 From 83fad97645bb229ea5cc89ca094add0c238765fc Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Wed, 7 Aug 2019 13:42:02 +0900 Subject: [PATCH 32/76] Add test (#101) --- dispatcher/server_test.go | 85 +++++++++++++++++++++------------------ 1 file changed, 46 insertions(+), 39 deletions(-) diff --git a/dispatcher/server_test.go b/dispatcher/server_test.go index 5500ed2..28ed3f4 100644 --- a/dispatcher/server_test.go +++ b/dispatcher/server_test.go @@ -22,28 +22,28 @@ import ( "testing" "time" + "github.com/mosuka/blast/indexer" + "github.com/mosuka/blast/protobuf/index" + "github.com/mosuka/blast/indexutils" - "github.com/mosuka/blast/indexer" "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf/index" "github.com/mosuka/blast/protobuf/management" - "github.com/mosuka/blast/strutils" "github.com/mosuka/blast/testutils" ) func TestServer_Start(t *testing.T) { curDir, _ := os.Getwd() - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) + logger := logutils.NewLogger("INFO", "", 500, 3, 30, false) grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) managerPeerGrpcAddress1 := "" managerGrpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) managerHttpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerNodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + managerNodeId1 := "manager1" managerBindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) managerDataDir1 := testutils.TmpDir() managerRaftStorageType1 := "boltdb" @@ -66,7 +66,7 @@ func TestServer_Start(t *testing.T) { managerIndexStorageType1 := "boltdb" // create server - managerServer1, err := manager.NewServer(managerPeerGrpcAddress1, managerNode1, managerDataDir1, managerRaftStorageType1, managerIndexMapping1, managerIndexType1, managerIndexStorageType1, logger, grpcLogger, httpAccessLogger) + managerServer1, err := manager.NewServer(managerPeerGrpcAddress1, managerNode1, managerDataDir1, managerRaftStorageType1, managerIndexMapping1, managerIndexType1, managerIndexStorageType1, logger.Named(managerNodeId1), grpcLogger.Named(managerNodeId1), httpAccessLogger) defer func() { if managerServer1 != nil { managerServer1.Stop() @@ -79,10 +79,13 @@ func TestServer_Start(t *testing.T) { // start server managerServer1.Start() + // sleep + time.Sleep(5 * time.Second) + managerPeerGrpcAddress2 := managerGrpcAddress1 managerGrpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) managerHttpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerNodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + managerNodeId2 := "manager2" managerBindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) managerDataDir2 := testutils.TmpDir() managerRaftStorageType2 := "boltdb" @@ -105,7 +108,7 @@ func TestServer_Start(t *testing.T) { managerIndexStorageType2 := "boltdb" // create server - managerServer2, err := manager.NewServer(managerPeerGrpcAddress2, managerNode2, managerDataDir2, managerRaftStorageType2, managerIndexMapping2, managerIndexType2, managerIndexStorageType2, logger, grpcLogger, httpAccessLogger) + managerServer2, err := manager.NewServer(managerPeerGrpcAddress2, managerNode2, managerDataDir2, managerRaftStorageType2, managerIndexMapping2, managerIndexType2, managerIndexStorageType2, logger.Named(managerNodeId2), grpcLogger.Named(managerNodeId2), httpAccessLogger) defer func() { if managerServer2 != nil { managerServer2.Stop() @@ -118,10 +121,13 @@ func TestServer_Start(t *testing.T) { // start server managerServer2.Start() + // sleep + time.Sleep(5 * time.Second) + managerPeerGrpcAddress3 := managerGrpcAddress1 managerGrpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) managerHttpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerNodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + managerNodeId3 := "manager3" managerBindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) managerDataDir3 := testutils.TmpDir() managerRaftStorageType3 := "boltdb" @@ -144,7 +150,7 @@ func TestServer_Start(t *testing.T) { managerIndexStorageType3 := "boltdb" // create server - managerServer3, err := manager.NewServer(managerPeerGrpcAddress3, managerNode3, managerDataDir3, managerRaftStorageType3, managerIndexMapping3, managerIndexType3, managerIndexStorageType3, logger, grpcLogger, httpAccessLogger) + managerServer3, err := manager.NewServer(managerPeerGrpcAddress3, managerNode3, managerDataDir3, managerRaftStorageType3, managerIndexMapping3, managerIndexType3, managerIndexStorageType3, logger.Named(managerNodeId3), grpcLogger.Named(managerNodeId3), httpAccessLogger) defer func() { if managerServer3 != nil { managerServer3.Stop() @@ -217,7 +223,7 @@ func TestServer_Start(t *testing.T) { indexerPeerGrpcAddress1 := "" indexerGrpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerHttpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerNodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + indexerNodeId1 := "indexer1" indexerBindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerDataDir1 := testutils.TmpDir() defer func() { @@ -240,7 +246,7 @@ func TestServer_Start(t *testing.T) { } indexerIndexType1 := "upside_down" indexerIndexStorageType1 := "boltdb" - indexerServer1, err := indexer.NewServer(indexerManagerGrpcAddress1, indexerShardId1, indexerPeerGrpcAddress1, indexerNode1, indexerDataDir1, indexerRaftStorageType1, indexerIndexMapping1, indexerIndexType1, indexerIndexStorageType1, logger, grpcLogger, httpAccessLogger) + indexerServer1, err := indexer.NewServer(indexerManagerGrpcAddress1, indexerShardId1, indexerPeerGrpcAddress1, indexerNode1, indexerDataDir1, indexerRaftStorageType1, indexerIndexMapping1, indexerIndexType1, indexerIndexStorageType1, logger.Named(indexerNodeId1), grpcLogger.Named(indexerNodeId1), httpAccessLogger) defer func() { indexerServer1.Stop() }() @@ -257,7 +263,7 @@ func TestServer_Start(t *testing.T) { indexerPeerGrpcAddress2 := "" indexerGrpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerHttpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerNodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + indexerNodeId2 := "indexer2" indexerBindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerDataDir2 := testutils.TmpDir() defer func() { @@ -280,7 +286,7 @@ func TestServer_Start(t *testing.T) { } indexerIndexType2 := "upside_down" indexerIndexStorageType2 := "boltdb" - indexerServer2, err := indexer.NewServer(indexerManagerGrpcAddress2, indexerShardId2, indexerPeerGrpcAddress2, indexerNode2, indexerDataDir2, indexerRaftStorageType2, indexerIndexMapping2, indexerIndexType2, indexerIndexStorageType2, logger, grpcLogger, httpAccessLogger) + indexerServer2, err := indexer.NewServer(indexerManagerGrpcAddress2, indexerShardId2, indexerPeerGrpcAddress2, indexerNode2, indexerDataDir2, indexerRaftStorageType2, indexerIndexMapping2, indexerIndexType2, indexerIndexStorageType2, logger.Named(indexerNodeId2), grpcLogger.Named(indexerNodeId2), httpAccessLogger) defer func() { indexerServer2.Stop() }() @@ -297,7 +303,7 @@ func TestServer_Start(t *testing.T) { indexerPeerGrpcAddress3 := "" indexerGrpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerHttpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerNodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + indexerNodeId3 := "indexer3" indexerBindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerDataDir3 := testutils.TmpDir() defer func() { @@ -320,7 +326,7 @@ func TestServer_Start(t *testing.T) { } indexerIndexType3 := "upside_down" indexerIndexStorageType3 := "boltdb" - indexerServer3, err := indexer.NewServer(indexerManagerGrpcAddress3, indexerShardId3, indexerPeerGrpcAddress3, indexerNode3, indexerDataDir3, indexerRaftStorageType3, indexerIndexMapping3, indexerIndexType3, indexerIndexStorageType3, logger, grpcLogger, httpAccessLogger) + indexerServer3, err := indexer.NewServer(indexerManagerGrpcAddress3, indexerShardId3, indexerPeerGrpcAddress3, indexerNode3, indexerDataDir3, indexerRaftStorageType3, indexerIndexMapping3, indexerIndexType3, indexerIndexStorageType3, logger.Named(indexerNodeId3), grpcLogger.Named(indexerNodeId3), httpAccessLogger) defer func() { indexerServer3.Stop() }() @@ -389,7 +395,7 @@ func TestServer_Start(t *testing.T) { indexerPeerGrpcAddress4 := "" indexerGrpcAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerHttpAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerNodeId4 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + indexerNodeId4 := "indexer4" indexerBindAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerDataDir4 := testutils.TmpDir() defer func() { @@ -412,7 +418,7 @@ func TestServer_Start(t *testing.T) { } indexerIndexType4 := "upside_down" indexerIndexStorageType4 := "boltdb" - indexerServer4, err := indexer.NewServer(indexerManagerGrpcAddress4, indexerShardId4, indexerPeerGrpcAddress4, indexerNode4, indexerDataDir4, indexerRaftStorageType4, indexerIndexMapping4, indexerIndexType4, indexerIndexStorageType4, logger, grpcLogger, httpAccessLogger) + indexerServer4, err := indexer.NewServer(indexerManagerGrpcAddress4, indexerShardId4, indexerPeerGrpcAddress4, indexerNode4, indexerDataDir4, indexerRaftStorageType4, indexerIndexMapping4, indexerIndexType4, indexerIndexStorageType4, logger.Named(indexerNodeId4), grpcLogger.Named(indexerNodeId4), httpAccessLogger) defer func() { indexerServer4.Stop() }() @@ -429,7 +435,7 @@ func TestServer_Start(t *testing.T) { indexerPeerGrpcAddress5 := "" indexerGrpcAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerHttpAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerNodeId5 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + indexerNodeId5 := "indexer5" indexerBindAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerDataDir5 := testutils.TmpDir() defer func() { @@ -452,7 +458,7 @@ func TestServer_Start(t *testing.T) { } indexerIndexType5 := "upside_down" indexerIndexStorageType5 := "boltdb" - indexerServer5, err := indexer.NewServer(indexerManagerGrpcAddress5, indexerShardId5, indexerPeerGrpcAddress5, indexerNode5, indexerDataDir5, indexerRaftStorageType5, indexerIndexMapping5, indexerIndexType5, indexerIndexStorageType5, logger, grpcLogger, httpAccessLogger) + indexerServer5, err := indexer.NewServer(indexerManagerGrpcAddress5, indexerShardId5, indexerPeerGrpcAddress5, indexerNode5, indexerDataDir5, indexerRaftStorageType5, indexerIndexMapping5, indexerIndexType5, indexerIndexStorageType5, logger.Named(indexerNodeId5), grpcLogger.Named(indexerNodeId5), httpAccessLogger) defer func() { indexerServer5.Stop() }() @@ -469,7 +475,7 @@ func TestServer_Start(t *testing.T) { indexerPeerGrpcAddress6 := "" indexerGrpcAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerHttpAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerNodeId6 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + indexerNodeId6 := "indexer6" indexerBindAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerDataDir6 := testutils.TmpDir() defer func() { @@ -492,7 +498,7 @@ func TestServer_Start(t *testing.T) { } indexerIndexType6 := "upside_down" indexerIndexStorageType6 := "boltdb" - indexerServer6, err := indexer.NewServer(indexerManagerGrpcAddress6, indexerShardId6, indexerPeerGrpcAddress6, indexerNode6, indexerDataDir6, indexerRaftStorageType6, indexerIndexMapping6, indexerIndexType6, indexerIndexStorageType6, logger, grpcLogger, httpAccessLogger) + indexerServer6, err := indexer.NewServer(indexerManagerGrpcAddress6, indexerShardId6, indexerPeerGrpcAddress6, indexerNode6, indexerDataDir6, indexerRaftStorageType6, indexerIndexMapping6, indexerIndexType6, indexerIndexStorageType6, logger.Named(indexerNodeId6), grpcLogger.Named(indexerNodeId6), httpAccessLogger) defer func() { indexerServer6.Stop() }() @@ -553,23 +559,24 @@ func TestServer_Start(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expIndexerCluster2, actIndexerCluster2) } - //// - //// dispatcher - //// - //dispatcherManagerGrpcAddress := managerGrpcAddress1 - //dispatcherGrpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - //dispatcherHttpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) // - //dispatcher1, err := NewServer(dispatcherManagerGrpcAddress, dispatcherGrpcAddress, dispatcherHttpAddress, logger.Named("dispatcher1"), grpcLogger.Named("dispatcher1"), httpAccessLogger) - //defer func() { - // dispatcher1.Stop() - //}() - //if err != nil { - // t.Fatalf("%v", err) - //} - //// start server - //dispatcher1.Start() + // dispatcher // - //// sleep - //time.Sleep(5 * time.Second) + dispatcherManagerGrpcAddress := managerGrpcAddress1 + dispatcherGrpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dispatcherHttpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + + dispatcher1, err := NewServer(dispatcherManagerGrpcAddress, dispatcherGrpcAddress, dispatcherHttpAddress, logger.Named("dispatcher1"), grpcLogger.Named("dispatcher1"), httpAccessLogger) + defer func() { + dispatcher1.Stop() + }() + if err != nil { + t.Fatalf("%v", err) + } + + // start server + dispatcher1.Start() + + // sleep + time.Sleep(5 * time.Second) } From 02cc35462c6fbd6a1fc21d324ac89d41979f7a17 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Thu, 8 Aug 2019 17:01:27 +0900 Subject: [PATCH 33/76] Add prefix search example --- example/wiki_search_request_prefix.json | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 example/wiki_search_request_prefix.json diff --git a/example/wiki_search_request_prefix.json b/example/wiki_search_request_prefix.json new file mode 100644 index 0000000..adb5f92 --- /dev/null +++ b/example/wiki_search_request_prefix.json @@ -0,0 +1,14 @@ +{ + "query": { + "prefix": "searc", + "field": "title_en" + }, + "size": 10, + "from": 0, + "fields": [ + "*" + ], + "sort": [ + "-_score" + ] +} From c4077b7833c06b51a6c18e896a3bb8a8118f0666 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Thu, 8 Aug 2019 17:48:24 +0900 Subject: [PATCH 34/76] Update GEO search request example --- example/geo_search_request.json | 31 +------------------------------ 1 file changed, 1 insertion(+), 30 deletions(-) diff --git a/example/geo_search_request.json b/example/geo_search_request.json index f49261b..40baa91 100644 --- a/example/geo_search_request.json +++ b/example/geo_search_request.json @@ -22,34 +22,5 @@ "lat": 37.399285 } } - ], - "facets": { - "State count": { - "size": 10, - "field": "state" - }, - "Updated range": { - "size": 10, - "field": "updated", - "date_ranges": [ - { - "name": "2001 - 2010", - "start": "2001-01-01T00:00:00Z", - "end": "2010-12-31T23:59:59Z" - }, - { - "name": "2011 - 2020", - "start": "2011-01-01T00:00:00Z", - "end": "2020-12-31T23:59:59Z" - } - ] - } - }, - "highlight": { - "style": "html", - "fields": [ - "title", - "text" - ] - } + ] } From 00ff94fb21e4dc9f3c10ecf4854c6015f8ae00bb Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 27 Aug 2019 09:22:38 +0900 Subject: [PATCH 35/76] Migrate to grpc-gateway (#105) * Migrate to grpc-gateway * Add HTTP server * Change REST API parameter * Migrate to grpc-gateway * Update REST API * Change protobuf * Change index protobuf * Add delete document endpoint * Add bulk delete endpoint * Add search endpoint * Migrate to grpc-gateway * Update protobuf * Refactoring * Refactoring * Add swagger.json * Generate protobuf and swagger.json * Revert swagger.json * Update Dockerfile * Update wkipedia example * Update CHANGES.md --- CHANGES.md | 1 + Dockerfile | 2 +- Makefile | 7 +- README.md | 729 ++++++++++------ cmd/blast/dispatcher_delete.go | 121 +-- cmd/blast/dispatcher_get.go | 13 +- cmd/blast/dispatcher_index.go | 232 +++-- cmd/blast/dispatcher_node_health.go | 33 +- cmd/blast/dispatcher_search.go | 48 +- cmd/blast/dispatcher_start.go | 3 +- cmd/blast/indexer_cluster_info.go | 11 +- cmd/blast/indexer_cluster_leave.go | 15 +- cmd/blast/indexer_cluster_watch.go | 22 +- cmd/blast/indexer_delete.go | 121 +-- cmd/blast/indexer_get.go | 11 +- cmd/blast/indexer_index.go | 238 ++++-- cmd/blast/indexer_node_health.go | 30 +- cmd/blast/indexer_node_info.go | 10 +- cmd/blast/indexer_search.go | 48 +- cmd/blast/indexer_snapshot.go | 13 +- cmd/blast/indexer_start.go | 6 +- cmd/blast/main.go | 18 + cmd/blast/manager_cluster_info.go | 10 +- cmd/blast/manager_cluster_leave.go | 14 +- cmd/blast/manager_cluster_watch.go | 21 +- cmd/blast/manager_delete.go | 14 +- cmd/blast/manager_get.go | 14 +- cmd/blast/manager_node_health.go | 30 +- cmd/blast/manager_node_info.go | 10 +- cmd/blast/manager_set.go | 24 +- cmd/blast/manager_snapshot.go | 12 +- cmd/blast/manager_start.go | 6 +- cmd/blast/manager_watch.go | 35 +- dispatcher/grpc_client.go | 145 +--- dispatcher/grpc_gateway.go | 353 ++++++++ dispatcher/grpc_service.go | 463 ++++++---- dispatcher/http_handler.go | 568 +------------ dispatcher/server.go | 27 +- dispatcher/server_test.go | 134 +-- example/geo_search_request.json | 42 +- example/wiki_bulk_delete.txt | 32 + example/wiki_search_request.json | 78 +- example/wiki_search_request_prefix.json | 22 +- example/wiki_search_request_simple.json | 24 +- go.mod | 22 +- go.sum | 127 ++- indexer/grpc_client.go | 255 +----- indexer/grpc_gateway.go | 376 ++++++++ indexer/grpc_service.go | 233 ++--- indexer/http_handler.go | 570 +------------ indexer/raft_command.go | 43 - indexer/raft_fsm.go | 68 +- indexer/raft_server.go | 165 ++-- indexer/server.go | 112 ++- indexer/server_test.go | 612 +++++++------ manager/grpc_client.go | 174 +--- manager/grpc_gateway.go | 172 ++++ manager/grpc_server.go | 36 +- manager/grpc_service.go | 61 +- manager/http_handler.go | 79 ++ manager/http_router.go | 296 ------- manager/http_server.go | 16 - manager/raft_command.go | 43 - manager/raft_fsm.go | 63 +- manager/raft_fsm_test.go | 55 +- manager/raft_server.go | 78 +- manager/server.go | 33 +- manager/server_test.go | 912 +++++++++++++------- maputils/maputils.go | 7 +- maputils/maputils_test.go | 4 +- protobuf/distribute/distribute.pb.go | 750 +++++++++------- protobuf/distribute/distribute.pb.gw.go | 443 ++++++++++ protobuf/distribute/distribute.proto | 97 ++- protobuf/index/index.go | 15 +- protobuf/index/index.pb.go | 1036 +++++++++++++++-------- protobuf/index/index.pb.gw.go | 510 +++++++++++ protobuf/index/index.proto | 131 ++- protobuf/management/management.pb.go | 408 ++++++--- protobuf/management/management.pb.gw.go | 379 +++++++++ protobuf/management/management.proto | 85 +- 80 files changed, 7539 insertions(+), 4737 deletions(-) create mode 100644 dispatcher/grpc_gateway.go create mode 100644 indexer/grpc_gateway.go delete mode 100644 indexer/raft_command.go create mode 100644 manager/grpc_gateway.go create mode 100644 manager/http_handler.go delete mode 100644 manager/http_router.go delete mode 100644 manager/raft_command.go create mode 100644 protobuf/distribute/distribute.pb.gw.go create mode 100644 protobuf/index/index.pb.gw.go create mode 100644 protobuf/management/management.pb.gw.go diff --git a/CHANGES.md b/CHANGES.md index da3402e..ce6fb19 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -27,6 +27,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Use protobuf document #98 - Change node state to Node_SHUTDOWN in a error #99 - Fix a bug for waiting to receive an indexer cluster updates from the stream #100 +- Migrate to grpc-gateway #105 ## [v0.7.1] - 2019-07-18 diff --git a/Dockerfile b/Dockerfile index 4b91268..bda65ea 100644 --- a/Dockerfile +++ b/Dockerfile @@ -67,7 +67,7 @@ COPY --from=0 /go/src/github.com/blevesearch/cld2/cld2/internal/*.so /usr/local/ COPY --from=0 /go/src/github.com/mosuka/blast/bin/* /usr/bin/ COPY --from=0 /go/src/github.com/mosuka/blast/docker-entrypoint.sh /usr/bin/ -EXPOSE 2000 5000 8000 +EXPOSE 2000 5000 6000 8000 ENTRYPOINT [ "/usr/bin/docker-entrypoint.sh" ] CMD [ "blast", "--help" ] diff --git a/Makefile b/Makefile index ecda13d..c77e6f4 100644 --- a/Makefile +++ b/Makefile @@ -30,6 +30,8 @@ PROTOBUFS = $(shell find . -name '*.proto' -print0 | xargs -0 -n1 dirname | sort TARGET_PACKAGES = $(shell find . -name 'main.go' -print0 | xargs -0 -n1 dirname | sort | uniq | grep -v /vendor/) +GRPC_GATEWAY_PATH = $(shell $(GO) list -m -f "{{.Dir}}" github.com/grpc-ecosystem/grpc-gateway) + ifeq ($(VERSION),) VERSION = latest endif @@ -44,7 +46,10 @@ endif .PHONY: protoc protoc: @echo ">> generating proto3 code" - @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=$$proto_dir --go_out=plugins=grpc:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done + @echo " GRPC_GATEWAY_PATH = $(GRPC_GATEWAY_PATH)" + @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --go_out=plugins=grpc:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done + @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --grpc-gateway_out=logtostderr=true,allow_delete_body=true:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done +# @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --swagger_out=logtostderr=true,allow_delete_body=true:. $$proto_dir/*.proto || exit 1; done .PHONY: format format: diff --git a/README.md b/README.md index e25d40d..beda7db 100644 --- a/README.md +++ b/README.md @@ -72,7 +72,6 @@ $ ./compile_libs.sh $ sudo cp *.so /usr/local/lib ``` - ### macOS High Sierra Version 10.13.6 ```bash @@ -123,7 +122,6 @@ $ make \ You can enable all the Bleve extensions supported by Blast as follows: - ### Linux ```bash @@ -134,8 +132,7 @@ $ make \ build ``` - -#### macOS +### macOS ```bash $ make \ @@ -147,7 +144,6 @@ $ make \ build ``` - ### Build flags Please refer to the following table for details of Bleve Extensions: @@ -164,7 +160,6 @@ Please refer to the following table for details of Bleve Extensions: If you want to enable the feature whose `CGO_ENABLE` is `1`, please install it referring to the Installing dependencies section above. - ### Binaries You can see the binary file when build successful like so: @@ -186,7 +181,6 @@ $ make \ You can test with all the Bleve extensions supported by Blast as follows: - ### Linux ```bash @@ -197,8 +191,7 @@ $ make \ test ``` - -#### macOS +### macOS ```bash $ make \ @@ -223,8 +216,7 @@ $ make \ dist ``` - -#### macOS +### macOS ```bash $ make \ @@ -237,7 +229,6 @@ $ make \ ``` - ## Starting Blast in standalone mode ![standalone](https://user-images.githubusercontent.com/970948/59768879-138f5180-92e0-11e9-8b33-c7b1a93e0893.png) @@ -247,6 +238,7 @@ Running a Blast in standalone mode is easy. Start a indexer like so: ```bash $ ./bin/blast indexer start \ --grpc-address=:5000 \ + --grpc-gateway-address=:6000 \ --http-address=:8000 \ --node-id=indexer1 \ --node-address=:2000 \ @@ -266,26 +258,28 @@ Please refer to following document for details of index mapping: You can check the node with the following command: ```bash -$ ./bin/blast indexer node info --grpc-address=:5000 +$ ./bin/blast indexer node info --grpc-address=:5000 | jq . ``` You can see the result in JSON format. The result of the above command is: ```json { - "id": "indexer1", - "bind_address": ":2000", - "state": 3, - "metadata": { - "grpc_address": ":5000", - "http_address": ":8000" + "node": { + "id": "indexer1", + "bind_address": ":2000", + "state": 3, + "metadata": { + "grpc_address": ":5000", + "grpc_gateway_address": ":6000", + "http_address": ":8000" + } } } ``` You can now put, get, search and delete the documents via CLI. - ### Indexing a document via CLI For document indexing, execute the following command: @@ -293,209 +287,213 @@ For document indexing, execute the following command: ```bash $ ./bin/blast indexer index --grpc-address=:5000 enwiki_1 ' { - "title_en": "Search engine (computing)", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "_type": "enwiki" + "fields": { + "title_en": "Search engine (computing)", + "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "_type": "enwiki" + } } -' +' | jq . ``` or ```bash -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/wiki_doc_enwiki_1.json +$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/wiki_doc_enwiki_1.json | jq . ``` You can see the result in JSON format. The result of the above command is: -```bash -1 +```json +{} ``` - ### Getting a document via CLI Getting a document is as following: ```bash -$ ./bin/blast indexer get --grpc-address=:5000 enwiki_1 +$ ./bin/blast indexer get --grpc-address=:5000 enwiki_1 | jq . ``` You can see the result in JSON format. The result of the above command is: ```json { - "_type": "enwiki", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" + "fields": { + "_type": "enwiki", + "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "title_en": "Search engine (computing)" + } } ``` - ### Searching documents via CLI Searching documents is as like following: ```bash -$ ./bin/blast indexer search --grpc-address=:5000 --file=./example/wiki_search_request.json +$ ./bin/blast indexer search --grpc-address=:5000 --file=./example/wiki_search_request.json | jq . ``` You can see the result in JSON format. The result of the above command is: ```json { - "status": { - "total": 1, - "failed": 0, - "successful": 1 - }, - "request": { - "query": { - "query": "+_all:search" - }, - "size": 10, - "from": 0, - "highlight": { - "style": "html", - "fields": [ - "title", - "text" - ] + "search_result": { + "status": { + "total": 1, + "failed": 0, + "successful": 1 }, - "fields": [ - "*" - ], - "facets": { - "Timestamp range": { - "size": 10, - "field": "timestamp", - "date_ranges": [ - { - "end": "2010-12-31T23:59:59Z", - "name": "2001 - 2010", - "start": "2001-01-01T00:00:00Z" - }, - { - "end": "2020-12-31T23:59:59Z", - "name": "2011 - 2020", - "start": "2011-01-01T00:00:00Z" - } + "request": { + "query": { + "query": "+_all:search" + }, + "size": 10, + "from": 0, + "highlight": { + "style": "html", + "fields": [ + "title", + "text" ] }, - "Type count": { - "size": 10, - "field": "_type" - } - }, - "explain": false, - "sort": [ - "-_score", - "_id", - "-timestamp" - ], - "includeLocations": false - }, - "hits": [ - { - "index": "/tmp/blast/indexer1/index", - "id": "enwiki_1", - "score": 0.09703538256409851, - "locations": { - "text_en": { - "search": [ - { - "pos": 2, - "start": 2, - "end": 8, - "array_positions": null - }, - { - "pos": 20, - "start": 118, - "end": 124, - "array_positions": null - }, - { - "pos": 33, - "start": 195, - "end": 201, - "array_positions": null - }, - { - "pos": 68, - "start": 415, - "end": 421, - "array_positions": null - }, + "fields": [ + "*" + ], + "facets": { + "Timestamp range": { + "size": 10, + "field": "timestamp", + "date_ranges": [ { - "pos": 73, - "start": 438, - "end": 444, - "array_positions": null + "end": "2010-12-31T23:59:59Z", + "name": "2001 - 2010", + "start": "2001-01-01T00:00:00Z" }, { - "pos": 76, - "start": 458, - "end": 466, - "array_positions": null + "end": "2020-12-31T23:59:59Z", + "name": "2011 - 2020", + "start": "2011-01-01T00:00:00Z" } ] }, - "title_en": { - "search": [ - { - "pos": 1, - "start": 0, - "end": 6, - "array_positions": null - } - ] + "Type count": { + "size": 10, + "field": "_type" } }, + "explain": false, "sort": [ - "_score", - "enwiki_1", - " \u0001\u0015\u001f\u0004~80Pp\u0000" + "-_score", + "_id", + "-timestamp" ], - "fields": { - "_type": "enwiki", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" - } - } - ], - "total_hits": 1, - "max_score": 0.09703538256409851, - "took": 688819, - "facets": { - "Timestamp range": { - "field": "timestamp", - "total": 1, - "missing": 0, - "other": 0, - "date_ranges": [ - { - "name": "2011 - 2020", - "start": "2011-01-01T00:00:00Z", - "end": "2020-12-31T23:59:59Z", - "count": 1 - } - ] + "includeLocations": false }, - "Type count": { - "field": "_type", - "total": 1, - "missing": 0, - "other": 0, - "terms": [ - { - "term": "enwiki", - "count": 1 + "hits": [ + { + "index": "/tmp/blast/indexer1/index", + "id": "enwiki_1", + "score": 0.09703538256409851, + "locations": { + "text_en": { + "search": [ + { + "pos": 2, + "start": 2, + "end": 8, + "array_positions": null + }, + { + "pos": 20, + "start": 118, + "end": 124, + "array_positions": null + }, + { + "pos": 33, + "start": 195, + "end": 201, + "array_positions": null + }, + { + "pos": 68, + "start": 415, + "end": 421, + "array_positions": null + }, + { + "pos": 73, + "start": 438, + "end": 444, + "array_positions": null + }, + { + "pos": 76, + "start": 458, + "end": 466, + "array_positions": null + } + ] + }, + "title_en": { + "search": [ + { + "pos": 1, + "start": 0, + "end": 6, + "array_positions": null + } + ] + } + }, + "sort": [ + "_score", + "enwiki_1", + " \u0001\u0015\u001f\u0004~80Pp\u0000" + ], + "fields": { + "_type": "enwiki", + "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "title_en": "Search engine (computing)" } - ] + } + ], + "total_hits": 1, + "max_score": 0.09703538256409851, + "took": 122105, + "facets": { + "Timestamp range": { + "field": "timestamp", + "total": 1, + "missing": 0, + "other": 0, + "date_ranges": [ + { + "name": "2011 - 2020", + "start": "2011-01-01T00:00:00Z", + "end": "2020-12-31T23:59:59Z", + "count": 1 + } + ] + }, + "Type count": { + "field": "_type", + "total": 1, + "missing": 0, + "other": 0, + "terms": [ + { + "term": "enwiki", + "count": 1 + } + ] + } } } } @@ -508,7 +506,6 @@ Please refer to following document for details of search request and result: - https://github.com/blevesearch/bleve/blob/master/search.go#L267 - https://github.com/blevesearch/bleve/blob/master/search.go#L443 - ### Deleting a document via CLI Deleting a document is as following: @@ -519,38 +516,40 @@ $ ./bin/blast indexer delete --grpc-address=:5000 enwiki_1 You can see the result in JSON format. The result of the above command is: -```bash -1 +```json +{} ``` - ### Indexing documents in bulk via CLI Indexing documents in bulk, run the following command: ```bash -$ ./bin/blast indexer index --grpc-address=:5000 --file=./example/wiki_bulk_index.jsonl --bulk +$ ./bin/blast indexer index --grpc-address=:5000 --file=./example/wiki_bulk_index.jsonl --bulk | jq . ``` You can see the result in JSON format. The result of the above command is: -```bash -36 +```json +{ + "count": 36 +} ``` - ### Deleting documents in bulk via CLI Deleting documents in bulk, run the following command: ```bash -$ ./bin/blast indexer delete --grpc-address=:5000 --file=./example/wiki_bulk_delete.txt +$ ./bin/blast indexer delete --grpc-address=:5000 --file=./example/wiki_bulk_delete.txt | jq . ``` You can see the result in JSON format. The result of the above command is: -```bash -4 +```json +{ + "count": 36 +} ``` @@ -558,26 +557,33 @@ You can see the result in JSON format. The result of the above command is: Also you can do above commands via HTTP REST API that listened port 5002. - ### Indexing a document via HTTP REST API Indexing a document via HTTP is as following: ```bash -$ curl -X PUT 'http://127.0.0.1:8000/documents/enwiki_1' -H 'Content-Type: application/json' --data-binary ' +$ curl -X PUT 'http://127.0.0.1:6000/v1/documents/enwiki_1' -H 'Content-Type: application/json' --data-binary ' { - "title_en": "Search engine (computing)", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "_type": "enwiki" + "fields": { + "title_en": "Search engine (computing)", + "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "_type": "enwiki" + } } -' +' | jq . ``` or ```bash -$ curl -X PUT 'http://127.0.0.1:8000/documents' -H 'Content-Type: application/json' --data-binary @./example/wiki_doc_enwiki_1.json +$ curl -X PUT 'http://127.0.0.1:6000/v1/documents' -H 'Content-Type: application/json' --data-binary @./example/wiki_doc_enwiki_1.json | jq . +``` + +You can see the result in JSON format. The result of the above command is: + +```json +{} ``` ### Getting a document via HTTP REST API @@ -585,43 +591,235 @@ $ curl -X PUT 'http://127.0.0.1:8000/documents' -H 'Content-Type: application/js Getting a document via HTTP is as following: ```bash -$ curl -X GET 'http://127.0.0.1:8000/documents/enwiki_1' +$ curl -X GET 'http://127.0.0.1:6000/v1/documents/enwiki_1' -H 'Content-Type: application/json' | jq . ``` +You can see the result in JSON format. The result of the above command is: + +```json +{ + "fields": { + "_type": "enwiki", + "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "title_en": "Search engine (computing)" + } +} +``` ### Searching documents via HTTP REST API Searching documents via HTTP is as following: ```bash -$ curl -X POST 'http://127.0.0.1:8000/search' -H 'Content-Type: application/json' --data-binary @./example/wiki_search_request.json +$ curl -X POST 'http://127.0.0.1:6000/v1/search' -H 'Content-Type: application/json' --data-binary @./example/wiki_search_request.json | jq . ``` +You can see the result in JSON format. The result of the above command is: + +```json +{ + "search_result": { + "status": { + "total": 1, + "failed": 0, + "successful": 1 + }, + "request": { + "query": { + "query": "+_all:search" + }, + "size": 10, + "from": 0, + "highlight": { + "style": "html", + "fields": [ + "title", + "text" + ] + }, + "fields": [ + "*" + ], + "facets": { + "Timestamp range": { + "size": 10, + "field": "timestamp", + "date_ranges": [ + { + "end": "2010-12-31T23:59:59Z", + "name": "2001 - 2010", + "start": "2001-01-01T00:00:00Z" + }, + { + "end": "2020-12-31T23:59:59Z", + "name": "2011 - 2020", + "start": "2011-01-01T00:00:00Z" + } + ] + }, + "Type count": { + "size": 10, + "field": "_type" + } + }, + "explain": false, + "sort": [ + "-_score", + "_id", + "-timestamp" + ], + "includeLocations": false + }, + "hits": [ + { + "index": "/tmp/blast/indexer1/index", + "id": "enwiki_1", + "score": 0.09703538256409851, + "locations": { + "text_en": { + "search": [ + { + "pos": 2, + "start": 2, + "end": 8, + "array_positions": null + }, + { + "pos": 20, + "start": 118, + "end": 124, + "array_positions": null + }, + { + "pos": 33, + "start": 195, + "end": 201, + "array_positions": null + }, + { + "pos": 68, + "start": 415, + "end": 421, + "array_positions": null + }, + { + "pos": 73, + "start": 438, + "end": 444, + "array_positions": null + }, + { + "pos": 76, + "start": 458, + "end": 466, + "array_positions": null + } + ] + }, + "title_en": { + "search": [ + { + "pos": 1, + "start": 0, + "end": 6, + "array_positions": null + } + ] + } + }, + "sort": [ + "_score", + "enwiki_1", + " \u0001\u0015\u001f\u0004~80Pp\u0000" + ], + "fields": { + "_type": "enwiki", + "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "title_en": "Search engine (computing)" + } + } + ], + "total_hits": 1, + "max_score": 0.09703538256409851, + "took": 323568, + "facets": { + "Timestamp range": { + "field": "timestamp", + "total": 1, + "missing": 0, + "other": 0, + "date_ranges": [ + { + "name": "2011 - 2020", + "start": "2011-01-01T00:00:00Z", + "end": "2020-12-31T23:59:59Z", + "count": 1 + } + ] + }, + "Type count": { + "field": "_type", + "total": 1, + "missing": 0, + "other": 0, + "terms": [ + { + "term": "enwiki", + "count": 1 + } + ] + } + } + } +} +``` ### Deleting a document via HTTP REST API Deleting a document via HTTP is as following: ```bash -$ curl -X DELETE 'http://127.0.0.1:8000/documents/enwiki_1' +$ curl -X DELETE 'http://127.0.0.1:6000/v1/documents/enwiki_1' -H 'Content-Type: application/json' | jq . ``` +You can see the result in JSON format. The result of the above command is: + +```json +{} +``` ### Indexing documents in bulk via HTTP REST API Indexing documents in bulk via HTTP is as following: ```bash -$ curl -X PUT 'http://127.0.0.1:8000/documents?bulk=true' -H 'Content-Type: application/x-ndjson' --data-binary @./example/wiki_bulk_index.jsonl +$ curl -X PUT 'http://127.0.0.1:6000/v1/bulk' -H 'Content-Type: application/x-ndjson' --data-binary @./example/wiki_bulk_index.jsonl | jq . ``` +You can see the result in JSON format. The result of the above command is: + +```json +{ + "count": 36 +} +``` ### Deleting documents in bulk via HTTP REST API Deleting documents in bulk via HTTP is as following: ```bash -$ curl -X DELETE 'http://127.0.0.1:8000/documents' -H 'Content-Type: text/plain' --data-binary @./example/wiki_bulk_delete.txt +$ curl -X DELETE 'http://127.0.0.1:6000/v1/bulk' -H 'Content-Type: text/plain' --data-binary @./example/wiki_bulk_delete.txt | jq . +``` + +You can see the result in JSON format. The result of the above command is: + +```json +{ + "count": 36 +} ``` @@ -636,6 +834,7 @@ First of all, start a indexer in standalone. ```bash $ ./bin/blast indexer start \ --grpc-address=:5000 \ + --grpc-gateway-address=:6000 \ --http-address=:8000 \ --node-id=indexer1 \ --node-address=:2000 \ @@ -652,6 +851,7 @@ Then, start two more indexers. $ ./bin/blast indexer start \ --peer-grpc-address=:5000 \ --grpc-address=:5010 \ + --grpc-gateway-address=:6010 \ --http-address=:8010 \ --node-id=indexer2 \ --node-address=:2010 \ @@ -661,6 +861,7 @@ $ ./bin/blast indexer start \ $ ./bin/blast indexer start \ --peer-grpc-address=:5000 \ --grpc-address=:5020 \ + --grpc-gateway-address=:6020 \ --http-address=:8020 \ --node-id=indexer3 \ --node-address=:2020 \ @@ -673,41 +874,51 @@ _Above example shows each Blast node running on the same host, so each node must This instructs each new node to join an existing node, specifying `--peer-addr=:5001`. Each node recognizes the joining clusters when started. So you have a 3-node cluster. That way you can tolerate the failure of 1 node. You can check the peers in the cluster with the following command: +```bash +$ ./bin/blast indexer cluster info --grpc-address=:5000 | jq . +``` + +or ```bash -$ ./bin/blast indexer cluster info --grpc-address=:5000 +$ curl -X GET 'http://127.0.0.1:6000/v1/cluster/status' -H 'Content-Type: application/json' | jq . ``` You can see the result in JSON format. The result of the above command is: ```json { - "nodes": { - "indexer1": { - "id": "indexer1", - "bind_address": ":2000", - "state": 3, - "metadata": { - "grpc_address": ":5000", - "http_address": ":8000" - } - }, - "indexer2": { - "id": "indexer2", - "bind_address": ":2010", - "state": 1, - "metadata": { - "grpc_address": ":5010", - "http_address": ":8010" - } - }, - "indexer3": { - "id": "indexer3", - "bind_address": ":2020", - "state": 1, - "metadata": { - "grpc_address": ":5020", - "http_address": ":8020" + "cluster": { + "nodes": { + "indexer1": { + "id": "indexer1", + "bind_address": ":2000", + "state": 1, + "metadata": { + "grpc_address": ":5000", + "grpc_gateway_address": ":6000", + "http_address": ":8000" + } + }, + "indexer2": { + "id": "indexer2", + "bind_address": ":2010", + "state": 1, + "metadata": { + "grpc_address": ":5010", + "grpc_gateway_address": ":6010", + "http_address": ":8010" + } + }, + "indexer3": { + "id": "indexer3", + "bind_address": ":2020", + "state": 3, + "metadata": { + "grpc_address": ":5020", + "grpc_gateway_address": ":6020", + "http_address": ":8020" + } } } } @@ -719,43 +930,45 @@ Recommend 3 or more odd number of nodes in the cluster. In failure scenarios, da The following command indexes documents to any node in the cluster: ```bash -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/wiki_doc_enwiki_1.json +$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/wiki_doc_enwiki_1.json | jq . ``` So, you can get the document from the node specified by the above command as follows: ```bash -$ ./bin/blast indexer get --grpc-address=:5000 enwiki_1 +$ ./bin/blast indexer get --grpc-address=:5000 enwiki_1 | jq . ``` You can see the result in JSON format. The result of the above command is: ```json { - "_type": "enwiki", - "contributor": "unknown", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" + "fields": { + "_type": "enwiki", + "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "title_en": "Search engine (computing)" + } } ``` You can also get the same document from other nodes in the cluster as follows: ```bash -$ ./bin/blast indexer get --grpc-address=:5010 enwiki_1 -$ ./bin/blast indexer get --grpc-address=:5020 enwiki_1 +$ ./bin/blast indexer get --grpc-address=:5010 enwiki_1 | jq . +$ ./bin/blast indexer get --grpc-address=:5020 enwiki_1 | jq . ``` You can see the result in JSON format. The result of the above command is: ```json { - "_type": "enwiki", - "contributor": "unknown", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" + "fields": { + "_type": "enwiki", + "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "title_en": "Search engine (computing)" + } } ``` @@ -772,13 +985,14 @@ Blast provides the following type of node for federation: - manager: Manager manage common index mappings to index across multiple indexers. It also manages information and status of clusters that participate in the federation. - dispatcher: Dispatcher is responsible for distributed search or indexing of each indexer. In the case of a index request, send document to each cluster based on the document ID. And in the case of a search request, the same query is sent to each cluster, then the search results are merged and returned to the client. -### Bring up the manager cluster. +### Bring up the manager cluster Manager can also bring up a cluster like an indexer. Specify a common index mapping for federation at startup. ```bash $ ./bin/blast manager start \ --grpc-address=:5100 \ + --grpc-gateway-address=:6100 \ --http-address=:8100 \ --node-id=manager1 \ --node-address=:2100 \ @@ -791,6 +1005,7 @@ $ ./bin/blast manager start \ $ ./bin/blast manager start \ --peer-grpc-address=:5100 \ --grpc-address=:5110 \ + --grpc-gateway-address=:6110 \ --http-address=:8110 \ --node-id=manager2 \ --node-address=:2110 \ @@ -800,6 +1015,7 @@ $ ./bin/blast manager start \ $ ./bin/blast manager start \ --peer-grpc-address=:5100 \ --grpc-address=:5120 \ + --grpc-gateway-address=:6120 \ --http-address=:8120 \ --node-id=manager3 \ --node-address=:2120 \ @@ -807,7 +1023,7 @@ $ ./bin/blast manager start \ --raft-storage-type=boltdb ``` -### Bring up the indexer cluster. +### Bring up the indexer cluster Federated mode differs from cluster mode that it specifies the manager in start up to bring up indexer cluster. The following example starts two 3-node clusters. @@ -817,6 +1033,7 @@ $ ./bin/blast indexer start \ --manager-grpc-address=:5100 \ --shard-id=shard1 \ --grpc-address=:5000 \ + --grpc-gateway-address=:6000 \ --http-address=:8000 \ --node-id=indexer1 \ --node-address=:2000 \ @@ -827,6 +1044,7 @@ $ ./bin/blast indexer start \ --manager-grpc-address=:5100 \ --shard-id=shard1 \ --grpc-address=:5010 \ + --grpc-gateway-address=:6010 \ --http-address=:8010 \ --node-id=indexer2 \ --node-address=:2010 \ @@ -837,6 +1055,7 @@ $ ./bin/blast indexer start \ --manager-grpc-address=:5100 \ --shard-id=shard1 \ --grpc-address=:5020 \ + --grpc-gateway-address=:6020 \ --http-address=:8020 \ --node-id=indexer3 \ --node-address=:2020 \ @@ -847,6 +1066,7 @@ $ ./bin/blast indexer start \ --manager-grpc-address=:5100 \ --shard-id=shard2 \ --grpc-address=:5030 \ + --grpc-gateway-address=:6030 \ --http-address=:8030 \ --node-id=indexer4 \ --node-address=:2030 \ @@ -857,6 +1077,7 @@ $ ./bin/blast indexer start \ --manager-grpc-address=:5100 \ --shard-id=shard2 \ --grpc-address=:5040 \ + --grpc-gateway-address=:6040 \ --http-address=:8040 \ --node-id=indexer5 \ --node-address=:2040 \ @@ -867,6 +1088,7 @@ $ ./bin/blast indexer start \ --manager-grpc-address=:5100 \ --shard-id=shard2 \ --grpc-address=:5050 \ + --grpc-gateway-address=:6050 \ --http-address=:8050 \ --node-id=indexer6 \ --node-address=:2050 \ @@ -874,7 +1096,7 @@ $ ./bin/blast indexer start \ --raft-storage-type=boltdb ``` -### Start up the dispatcher. +### Start up the dispatcher Finally, start the dispatcher with a manager that manages the target federation so that it can perform distributed search and indexing. @@ -882,29 +1104,32 @@ Finally, start the dispatcher with a manager that manages the target federation $ ./bin/blast dispatcher start \ --manager-grpc-address=:5100 \ --grpc-address=:5200 \ + --grpc-gateway-address=:6200 \ --http-address=:8200 ``` +### Check the cluster info + ```bash -$ ./bin/blast manager cluster info --grpc-address=:5100 -$ ./bin/blast indexer cluster info --grpc-address=:5000 -$ ./bin/blast indexer cluster info --grpc-address=:5040 +$ ./bin/blast manager cluster info --grpc-address=:5100 | jq . +$ ./bin/blast indexer cluster info --grpc-address=:5000 | jq . +$ ./bin/blast indexer cluster info --grpc-address=:5030 | jq . +$ ./bin/blast manager get cluster --grpc-address=:5100 --format=json | jq . ``` ```bash -$ ./bin/blast dispatcher index --grpc-address=:5200 --file=./example/wiki_bulk_index.jsonl --bulk +$ ./bin/blast dispatcher index --grpc-address=:5200 --file=./example/wiki_bulk_index.jsonl --bulk | jq . ``` ```bash -$ ./bin/blast dispatcher search --grpc-address=:5200 --file=./example/wiki_search_request_simple.json +$ ./bin/blast dispatcher search --grpc-address=:5200 --file=./example/wiki_search_request_simple.json | jq . ``` ```bash -$ ./bin/blast dispatcher delete --grpc-address=:5200 --file=./example/wiki_bulk_delete.txt +$ ./bin/blast dispatcher delete --grpc-address=:5200 --file=./example/wiki_bulk_delete.txt | jq . ``` - ## Blast on Docker ### Building Docker container image on localhost @@ -925,7 +1150,6 @@ $ docker pull mosuka/blast:latest See https://hub.docker.com/r/mosuka/blast/tags/ - ### Pulling Docker container image from docker.io You can also use the Docker container image already registered in docker.io like so: @@ -934,7 +1158,6 @@ You can also use the Docker container image already registered in docker.io like $ docker pull mosuka/blast:latest ``` - ### Running Indexer on Docker Running a Blast data node on Docker. Start Blast data node like so: @@ -943,10 +1166,12 @@ Running a Blast data node on Docker. Start Blast data node like so: $ docker run --rm --name blast-indexer1 \ -p 2000:2000 \ -p 5000:5000 \ + -p 6000:6000 \ -p 8000:8000 \ -v $(pwd)/example:/opt/blast/example \ mosuka/blast:latest blast indexer start \ --grpc-address=:5000 \ + --grpc-gateway-address=:6000 \ --http-address=:8000 \ --node-id=blast-indexer1 \ --node-address=:2000 \ @@ -968,7 +1193,6 @@ $ docker exec -it blast-indexer1 blast indexer node info --grpc-address=:5000 This section explain how to index Wikipedia dump to Blast. - ### Install wikiextractor ```bash @@ -976,14 +1200,12 @@ $ cd ${HOME} $ git clone git@github.com:attardi/wikiextractor.git ``` - ### Download wikipedia dump ```bash $ curl -o ~/tmp/enwiki-20190101-pages-articles.xml.bz2 https://dumps.wikimedia.org/enwiki/20190101/enwiki-20190101-pages-articles.xml.bz2 ``` - ### Parsing wikipedia dump ```bash @@ -991,12 +1213,12 @@ $ cd wikiextractor $ ./WikiExtractor.py -o ~/tmp/enwiki --json ~/tmp/enwiki-20190101-pages-articles.xml.bz2 ``` - ### Starting Indexer ```bash $ ./bin/blast indexer start \ --grpc-address=:5000 \ + --grpc-gateway-address=:6000 \ --http-address=:8000 \ --node-id=indexer1 \ --node-address=:2000 \ @@ -1015,7 +1237,8 @@ $ for FILE in $(find ~/tmp/enwiki -type f -name '*' | sort) echo "Indexing ${FILE}" TIMESTAMP=$(date -u "+%Y-%m-%dT%H:%M:%SZ") DOCS=$(cat ${FILE} | jq -r '. + {fields: {url: .url, title_en: .title, text_en: .text, timestamp: "'${TIMESTAMP}'", _type: "enwiki"}} | del(.url) | del(.title) | del(.text) | del(.fields.id)' | jq -c) - curl -s -X PUT -H 'Content-Type: application/json' "http://127.0.0.1:8000/documents?bulk=true" --data-binary "${DOCS}" + curl -s -X PUT -H 'Content-Type: application/x-ndjson' "http://127.0.0.1:6000/v1/bulk" --data-binary "${DOCS}" + echo "" done ``` diff --git a/cmd/blast/dispatcher_delete.go b/cmd/blast/dispatcher_delete.go index 8466634..255e350 100644 --- a/cmd/blast/dispatcher_delete.go +++ b/cmd/blast/dispatcher_delete.go @@ -16,12 +16,13 @@ package main import ( "bufio" - "encoding/json" + "errors" "fmt" "io" "os" "github.com/mosuka/blast/dispatcher" + "github.com/mosuka/blast/protobuf/distribute" "github.com/urfave/cli" ) @@ -30,75 +31,95 @@ func dispatcherDelete(c *cli.Context) error { filePath := c.String("file") id := c.Args().Get(0) - ids := make([]string, 0) - - if id != "" { - ids = append(ids, id) + // create client + client, err := dispatcher.NewGRPCClient(grpcAddr) + if err != nil { + return err } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() + + marshaler := dispatcher.JsonMarshaler{} - if filePath != "" { - _, err := os.Stat(filePath) + if id != "" { + req := &distribute.DeleteRequest{ + Id: id, + } + resp, err := client.Delete(req) if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error return err } - - // read index mapping file - file, err := os.Open(filePath) + respBytes, err := marshaler.Marshal(resp) if err != nil { return err } - defer func() { - _ = file.Close() - }() + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) + } else { + if filePath != "" { + ids := make([]string, 0) - reader := bufio.NewReader(file) - for { - docId, err := reader.ReadString('\n') + _, err := os.Stat(filePath) if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if docId != "" { - ids = append(ids, docId) - } - break + if os.IsNotExist(err) { + // does not exist + return err } + // other error + return err + } + // read index mapping file + file, err := os.Open(filePath) + if err != nil { return err } + defer func() { + _ = file.Close() + }() + + reader := bufio.NewReader(file) + for { + docIdBytes, _, err := reader.ReadLine() + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + docId := string(docIdBytes) + if docId != "" { + ids = append(ids, docId) + } + break + } - if docId != "" { - ids = append(ids, docId) + return err + } + docId := string(docIdBytes) + if docId != "" { + ids = append(ids, docId) + } } - } - } - // create client - client, err := dispatcher.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() + req := &distribute.BulkDeleteRequest{ + Ids: ids, + } - result, err := client.DeleteDocument(ids) - if err != nil { - return err - } + resp, err := client.BulkDelete(req) + if err != nil { + return err + } - resultBytes, err := json.MarshalIndent(result, "", " ") - if err != nil { - return err - } + resultBytes, err := marshaler.Marshal(resp) + if err != nil { + return err + } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) + } else { + return errors.New("argument error") + } + } return nil } diff --git a/cmd/blast/dispatcher_get.go b/cmd/blast/dispatcher_get.go index f46c7a7..cc01500 100644 --- a/cmd/blast/dispatcher_get.go +++ b/cmd/blast/dispatcher_get.go @@ -20,7 +20,7 @@ import ( "os" "github.com/mosuka/blast/dispatcher" - "github.com/mosuka/blast/protobuf/index" + "github.com/mosuka/blast/protobuf/distribute" "github.com/urfave/cli" ) @@ -43,17 +43,22 @@ func dispatcherGet(c *cli.Context) error { } }() - doc, err := client.GetDocument(id) + req := &distribute.GetRequest{ + Id: id, + } + + res, err := client.Get(req) if err != nil { return err } - docBytes, err := index.MarshalDocument(doc) + marshaler := dispatcher.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(docBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) return nil } diff --git a/cmd/blast/dispatcher_index.go b/cmd/blast/dispatcher_index.go index 86c2dd7..59dd811 100644 --- a/cmd/blast/dispatcher_index.go +++ b/cmd/blast/dispatcher_index.go @@ -16,13 +16,18 @@ package main import ( "bufio" + "bytes" "encoding/json" + "errors" "fmt" "io" "io/ioutil" "os" + "github.com/golang/protobuf/ptypes/any" "github.com/mosuka/blast/dispatcher" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/distribute" "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" ) @@ -31,52 +36,62 @@ func dispatcherIndex(c *cli.Context) error { grpcAddr := c.String("grpc-address") filePath := c.String("file") bulk := c.Bool("bulk") - id := c.Args().Get(0) - fieldsSrc := c.Args().Get(1) - docs := make([]*index.Document, 0) + // create gRPC client + client, err := dispatcher.NewGRPCClient(grpcAddr) + if err != nil { + return err + } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() + + marshaler := dispatcher.JsonMarshaler{} + + if c.NArg() >= 2 { + // index document by specifying ID and fields via standard input + id := c.Args().Get(0) + fieldsSrc := c.Args().Get(1) - if id != "" && fieldsSrc != "" { var fieldsMap map[string]interface{} err := json.Unmarshal([]byte(fieldsSrc), &fieldsMap) if err != nil { return err } - docMap := map[string]interface{}{ - "id": id, - "fields": fieldsMap, - } - docBytes, err := json.Marshal(docMap) + + fieldsAny := &any.Any{} + err = protobuf.UnmarshalAny(fieldsMap, fieldsAny) if err != nil { return err } - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - docs = append(docs, doc) - } - if filePath != "" { - _, err := os.Stat(filePath) + req := &distribute.IndexRequest{ + Id: id, + Fields: fieldsAny, + } + + res, err := client.Index(req) if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error return err } - // read index mapping file - file, err := os.Open(filePath) + resBytes, err := marshaler.Marshal(res) if err != nil { return err } - defer func() { - _ = file.Close() - }() + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + } else if c.NArg() == 1 { + // index document by specifying document(s) via standard input + docSrc := c.Args().Get(0) if bulk { - reader := bufio.NewReader(file) + // jsonl + docs := make([]*index.Document, 0) + reader := bufio.NewReader(bytes.NewReader([]byte(docSrc))) for { docBytes, err := reader.ReadBytes('\n') if err != nil { @@ -102,44 +117,157 @@ func dispatcherIndex(c *cli.Context) error { docs = append(docs, doc) } } + + req := &distribute.BulkIndexRequest{ + Documents: docs, + } + res, err := client.BulkIndex(req) + if err != nil { + return err + } + + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) } else { - docBytes, err := ioutil.ReadAll(file) + // json + var docMap map[string]interface{} + err := json.Unmarshal([]byte(docSrc), &docMap) if err != nil { return err } - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) + + fieldsAny := &any.Any{} + err = protobuf.UnmarshalAny(docMap["fields"].(map[string]interface{}), fieldsAny) if err != nil { return err } - docs = append(docs, doc) - } - } - // create gRPC client - client, err := dispatcher.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) + req := &distribute.IndexRequest{ + Id: docMap["id"].(string), + Fields: fieldsAny, + } + + res, err := client.Index(req) + if err != nil { + return err + } + + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) } - }() + } else { + // index document by specifying document(s) via file + if filePath != "" { + _, err := os.Stat(filePath) + if err != nil { + if os.IsNotExist(err) { + // does not exist + return err + } + // other error + return err + } - // index documents in bulk - count, err := client.IndexDocument(docs) - if err != nil { - return err - } + // read index mapping file + file, err := os.Open(filePath) + if err != nil { + return err + } + defer func() { + _ = file.Close() + }() - resultBytes, err := json.MarshalIndent(count, "", " ") - if err != nil { - return err - } + if bulk { + // jsonl + docs := make([]*index.Document, 0) + reader := bufio.NewReader(file) + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) + if err != nil { + return err + } + docs = append(docs, doc) + } + break + } + } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) + if len(docBytes) > 0 { + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) + if err != nil { + return err + } + docs = append(docs, doc) + } + } + + req := &distribute.BulkIndexRequest{ + Documents: docs, + } + res, err := client.BulkIndex(req) + if err != nil { + return err + } + + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + } else { + // json + docBytes, err := ioutil.ReadAll(file) + if err != nil { + return err + } + var docMap map[string]interface{} + err = json.Unmarshal(docBytes, &docMap) + if err != nil { + return err + } + + fieldsAny := &any.Any{} + err = protobuf.UnmarshalAny(docMap["fields"].(map[string]interface{}), fieldsAny) + if err != nil { + return err + } + + req := &distribute.IndexRequest{ + Id: docMap["id"].(string), + Fields: fieldsAny, + } + + res, err := client.Index(req) + if err != nil { + return err + } + + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + } + } else { + return errors.New("argument error") + } + } return nil } diff --git a/cmd/blast/dispatcher_node_health.go b/cmd/blast/dispatcher_node_health.go index 5fb1b8f..6594ffe 100644 --- a/cmd/blast/dispatcher_node_health.go +++ b/cmd/blast/dispatcher_node_health.go @@ -18,9 +18,8 @@ import ( "fmt" "os" - "github.com/mosuka/blast/protobuf/distribute" - "github.com/mosuka/blast/dispatcher" + "github.com/mosuka/blast/protobuf/distribute" "github.com/urfave/cli" ) @@ -41,30 +40,40 @@ func dispatcherNodeHealth(c *cli.Context) error { } }() - var state string + var res *distribute.NodeHealthCheckResponse if healthiness { - state, err = client.NodeHealthCheck(distribute.NodeHealthCheckRequest_HEALTHINESS.String()) + req := &distribute.NodeHealthCheckRequest{Probe: distribute.NodeHealthCheckRequest_HEALTHINESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = distribute.NodeHealthCheckResponse_UNHEALTHY.String() + res = &distribute.NodeHealthCheckResponse{State: distribute.NodeHealthCheckResponse_UNHEALTHY} } } else if liveness { - state, err = client.NodeHealthCheck(distribute.NodeHealthCheckRequest_LIVENESS.String()) + req := &distribute.NodeHealthCheckRequest{Probe: distribute.NodeHealthCheckRequest_LIVENESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = distribute.NodeHealthCheckResponse_DEAD.String() + res = &distribute.NodeHealthCheckResponse{State: distribute.NodeHealthCheckResponse_DEAD} } } else if readiness { - state, err = client.NodeHealthCheck(distribute.NodeHealthCheckRequest_READINESS.String()) + req := &distribute.NodeHealthCheckRequest{Probe: distribute.NodeHealthCheckRequest_READINESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = distribute.NodeHealthCheckResponse_NOT_READY.String() + res = &distribute.NodeHealthCheckResponse{State: distribute.NodeHealthCheckResponse_NOT_READY} } } else { - state, err = client.NodeHealthCheck(distribute.NodeHealthCheckRequest_HEALTHINESS.String()) + req := &distribute.NodeHealthCheckRequest{Probe: distribute.NodeHealthCheckRequest_HEALTHINESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = distribute.NodeHealthCheckResponse_UNHEALTHY.String() + res = &distribute.NodeHealthCheckResponse{State: distribute.NodeHealthCheckResponse_UNHEALTHY} } } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + marshaler := dispatcher.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) return nil } diff --git a/cmd/blast/dispatcher_search.go b/cmd/blast/dispatcher_search.go index 976e36a..bf6ccda 100644 --- a/cmd/blast/dispatcher_search.go +++ b/cmd/blast/dispatcher_search.go @@ -16,23 +16,27 @@ package main import ( "encoding/json" + "errors" "fmt" "io/ioutil" "os" "github.com/blevesearch/bleve" + "github.com/golang/protobuf/ptypes/any" "github.com/mosuka/blast/dispatcher" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/distribute" "github.com/urfave/cli" ) func dispatcherSearch(c *cli.Context) error { grpcAddr := c.String("grpc-address") - searchRequestPath := c.String("file") + filePath := c.String("file") searchRequest := bleve.NewSearchRequest(nil) - if searchRequestPath != "" { - _, err := os.Stat(searchRequestPath) + if filePath != "" { + _, err := os.Stat(filePath) if err != nil { if os.IsNotExist(err) { // does not exist @@ -43,23 +47,36 @@ func dispatcherSearch(c *cli.Context) error { } // open file - searchRequestFile, err := os.Open(searchRequestPath) + file, err := os.Open(filePath) if err != nil { return err } defer func() { - _ = searchRequestFile.Close() + _ = file.Close() }() // read file - searchRequestBytes, err := ioutil.ReadAll(searchRequestFile) + fileBytes, err := ioutil.ReadAll(file) if err != nil { return err } // create search request - if searchRequestBytes != nil { - err := json.Unmarshal(searchRequestBytes, searchRequest) + if fileBytes != nil { + var tmpValue map[string]interface{} + err = json.Unmarshal(fileBytes, &tmpValue) + if err != nil { + return err + } + searchRequestMap, ok := tmpValue["search_request"] + if !ok { + return errors.New("value does not exist") + } + searchRequestBytes, err := json.Marshal(searchRequestMap) + if err != nil { + return err + } + err = json.Unmarshal(searchRequestBytes, &searchRequest) if err != nil { return err } @@ -77,17 +94,26 @@ func dispatcherSearch(c *cli.Context) error { } }() - searchResult, err := client.Search(searchRequest) + searchRequestAny := &any.Any{} + err = protobuf.UnmarshalAny(searchRequest, searchRequestAny) + if err != nil { + return err + } + + req := &distribute.SearchRequest{SearchRequest: searchRequestAny} + + res, err := client.Search(req) if err != nil { return err } - jsonBytes, err := json.MarshalIndent(&searchResult, "", " ") + marshaler := dispatcher.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(jsonBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) return nil } diff --git a/cmd/blast/dispatcher_start.go b/cmd/blast/dispatcher_start.go index 534bea1..4b61df3 100644 --- a/cmd/blast/dispatcher_start.go +++ b/cmd/blast/dispatcher_start.go @@ -28,6 +28,7 @@ func dispatcherStart(c *cli.Context) error { managerAddr := c.String("manager-grpc-address") grpcAddr := c.String("grpc-address") + grpcGatewayAddr := c.String("grpc-gateway-address") httpAddr := c.String("http-address") logLevel := c.GlobalString("log-level") @@ -79,7 +80,7 @@ func dispatcherStart(c *cli.Context) error { httpLogCompress, ) - svr, err := dispatcher.NewServer(managerAddr, grpcAddr, httpAddr, logger, grpcLogger, httpAccessLogger) + svr, err := dispatcher.NewServer(managerAddr, grpcAddr, grpcGatewayAddr, httpAddr, logger, grpcLogger, httpAccessLogger) if err != nil { return err } diff --git a/cmd/blast/indexer_cluster_info.go b/cmd/blast/indexer_cluster_info.go index 434c011..7963655 100644 --- a/cmd/blast/indexer_cluster_info.go +++ b/cmd/blast/indexer_cluster_info.go @@ -15,10 +15,10 @@ package main import ( - "encoding/json" "fmt" "os" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/indexer" "github.com/urfave/cli" ) @@ -37,17 +37,20 @@ func indexerClusterInfo(c *cli.Context) error { } }() - cluster, err := client.ClusterInfo() + req := &empty.Empty{} + + resp, err := client.ClusterInfo(req) if err != nil { return err } - clusterBytes, err := json.MarshalIndent(cluster, "", " ") + marshaler := indexer.JsonMarshaler{} + respBytes, err := marshaler.Marshal(resp) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(clusterBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) return nil } diff --git a/cmd/blast/indexer_cluster_leave.go b/cmd/blast/indexer_cluster_leave.go index e564256..0793229 100644 --- a/cmd/blast/indexer_cluster_leave.go +++ b/cmd/blast/indexer_cluster_leave.go @@ -19,6 +19,7 @@ import ( "os" "github.com/mosuka/blast/indexer" + "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" ) @@ -46,10 +47,22 @@ func indexerClusterLeave(c *cli.Context) error { } }() - err = client.ClusterLeave(nodeId) + req := &index.ClusterLeaveRequest{ + Id: nodeId, + } + + resp, err := client.ClusterLeave(req) if err != nil { return err } + marshaler := indexer.JsonMarshaler{} + respBytes, err := marshaler.Marshal(resp) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) + return nil } diff --git a/cmd/blast/indexer_cluster_watch.go b/cmd/blast/indexer_cluster_watch.go index ba99bdb..a991b34 100644 --- a/cmd/blast/indexer_cluster_watch.go +++ b/cmd/blast/indexer_cluster_watch.go @@ -15,12 +15,12 @@ package main import ( - "encoding/json" "fmt" "io" "log" "os" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/indexer" "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" @@ -40,28 +40,31 @@ func indexerClusterWatch(c *cli.Context) error { } }() - cluster, err := client.ClusterInfo() + marshaler := indexer.JsonMarshaler{} + + req := &empty.Empty{} + clusterInfo, err := client.ClusterInfo(req) if err != nil { return err } resp := &index.ClusterWatchResponse{ Event: 0, Node: nil, - Cluster: cluster, + Cluster: clusterInfo.Cluster, } - clusterBytes, err := json.MarshalIndent(resp, "", " ") + respBytes, err := marshaler.Marshal(resp) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(clusterBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) - watchClient, err := client.ClusterWatch() + clusterWatchClient, err := client.ClusterWatch(req) if err != nil { return err } for { - resp, err := watchClient.Recv() + resp, err := clusterWatchClient.Recv() if err == io.EOF { break } @@ -69,12 +72,11 @@ func indexerClusterWatch(c *cli.Context) error { log.Println(err.Error()) break } - - clusterBytes, err = json.MarshalIndent(resp, "", " ") + respBytes, err = marshaler.Marshal(resp) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(clusterBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) } return nil diff --git a/cmd/blast/indexer_delete.go b/cmd/blast/indexer_delete.go index 7c1e1bd..b8aa834 100644 --- a/cmd/blast/indexer_delete.go +++ b/cmd/blast/indexer_delete.go @@ -16,12 +16,13 @@ package main import ( "bufio" - "encoding/json" + "errors" "fmt" "io" "os" "github.com/mosuka/blast/indexer" + "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" ) @@ -30,75 +31,95 @@ func indexerDelete(c *cli.Context) error { filePath := c.String("file") id := c.Args().Get(0) - ids := make([]string, 0) - - if id != "" { - ids = append(ids, id) + // create client + client, err := indexer.NewGRPCClient(grpcAddr) + if err != nil { + return err } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() + + marshaler := indexer.JsonMarshaler{} - if filePath != "" { - _, err := os.Stat(filePath) + if id != "" { + req := &index.DeleteRequest{ + Id: id, + } + resp, err := client.Delete(req) if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error return err } - - // read index mapping file - file, err := os.Open(filePath) + respBytes, err := marshaler.Marshal(resp) if err != nil { return err } - defer func() { - _ = file.Close() - }() + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) + } else { + if filePath != "" { + ids := make([]string, 0) - reader := bufio.NewReader(file) - for { - docId, err := reader.ReadString('\n') + _, err := os.Stat(filePath) if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if docId != "" { - ids = append(ids, docId) - } - break + if os.IsNotExist(err) { + // does not exist + return err } + // other error + return err + } + // read index mapping file + file, err := os.Open(filePath) + if err != nil { return err } + defer func() { + _ = file.Close() + }() + + reader := bufio.NewReader(file) + for { + docIdBytes, _, err := reader.ReadLine() + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + docId := string(docIdBytes) + if docId != "" { + ids = append(ids, docId) + } + break + } - if docId != "" { - ids = append(ids, docId) + return err + } + docId := string(docIdBytes) + if docId != "" { + ids = append(ids, docId) + } } - } - } - // create client - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() + req := &index.BulkDeleteRequest{ + Ids: ids, + } - result, err := client.DeleteDocument(ids) - if err != nil { - return err - } + resp, err := client.BulkDelete(req) + if err != nil { + return err + } - resultBytes, err := json.MarshalIndent(result, "", " ") - if err != nil { - return err - } + resultBytes, err := marshaler.Marshal(resp) + if err != nil { + return err + } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) + } else { + return errors.New("argument error") + } + } return nil } diff --git a/cmd/blast/indexer_get.go b/cmd/blast/indexer_get.go index 53abb27..976e4be 100644 --- a/cmd/blast/indexer_get.go +++ b/cmd/blast/indexer_get.go @@ -43,17 +43,22 @@ func indexerGet(c *cli.Context) error { } }() - doc, err := client.GetDocument(id) + req := &index.GetRequest{ + Id: id, + } + + resp, err := client.Get(req) if err != nil { return err } - docBytes, err := index.MarshalDocument(doc) + marshaler := indexer.JsonMarshaler{} + respBytes, err := marshaler.Marshal(resp) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(docBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) return nil } diff --git a/cmd/blast/indexer_index.go b/cmd/blast/indexer_index.go index c70e6ea..7f5521c 100644 --- a/cmd/blast/indexer_index.go +++ b/cmd/blast/indexer_index.go @@ -16,15 +16,18 @@ package main import ( "bufio" + "bytes" "encoding/json" + "errors" "fmt" "io" "io/ioutil" "os" - "github.com/mosuka/blast/protobuf/index" - + "github.com/golang/protobuf/ptypes/any" "github.com/mosuka/blast/indexer" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" ) @@ -32,52 +35,62 @@ func indexerIndex(c *cli.Context) error { grpcAddr := c.String("grpc-address") filePath := c.String("file") bulk := c.Bool("bulk") - id := c.Args().Get(0) - fieldsSrc := c.Args().Get(1) - docs := make([]*index.Document, 0) + // create gRPC client + client, err := indexer.NewGRPCClient(grpcAddr) + if err != nil { + return err + } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() - if id != "" && fieldsSrc != "" { - var fieldsMap map[string]interface{} - err := json.Unmarshal([]byte(fieldsSrc), &fieldsMap) + marshaler := indexer.JsonMarshaler{} + + if c.NArg() >= 2 { + // index document by specifying ID and fields via standard input + id := c.Args().Get(0) + docSrc := c.Args().Get(1) + + var docMap map[string]interface{} + err := json.Unmarshal([]byte(docSrc), &docMap) if err != nil { return err } - docMap := map[string]interface{}{ - "id": id, - "fields": fieldsMap, - } - docBytes, err := json.Marshal(docMap) + + fieldsAny := &any.Any{} + err = protobuf.UnmarshalAny(docMap["fields"], fieldsAny) if err != nil { return err } - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - docs = append(docs, doc) - } - if filePath != "" { - _, err := os.Stat(filePath) + req := &index.IndexRequest{ + Id: id, + Fields: fieldsAny, + } + + res, err := client.Index(req) if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error return err } - // read index mapping file - file, err := os.Open(filePath) + resBytes, err := marshaler.Marshal(res) if err != nil { return err } - defer func() { - _ = file.Close() - }() + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + } else if c.NArg() == 1 { + // index document by specifying document(s) via standard input + docSrc := c.Args().Get(0) if bulk { - reader := bufio.NewReader(file) + // jsonl + docs := make([]*index.Document, 0) + reader := bufio.NewReader(bytes.NewReader([]byte(docSrc))) for { docBytes, err := reader.ReadBytes('\n') if err != nil { @@ -103,44 +116,157 @@ func indexerIndex(c *cli.Context) error { docs = append(docs, doc) } } + + req := &index.BulkIndexRequest{ + Documents: docs, + } + res, err := client.BulkIndex(req) + if err != nil { + return err + } + + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) } else { - docBytes, err := ioutil.ReadAll(file) + // json + var docMap map[string]interface{} + err := json.Unmarshal([]byte(docSrc), &docMap) if err != nil { return err } - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) + + fieldsAny := &any.Any{} + err = protobuf.UnmarshalAny(docMap["fields"].(map[string]interface{}), fieldsAny) if err != nil { return err } - docs = append(docs, doc) - } - } - // create gRPC client - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) + req := &index.IndexRequest{ + Id: docMap["id"].(string), + Fields: fieldsAny, + } + + res, err := client.Index(req) + if err != nil { + return err + } + + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) } - }() + } else { + // index document by specifying document(s) via file + if filePath != "" { + _, err := os.Stat(filePath) + if err != nil { + if os.IsNotExist(err) { + // does not exist + return err + } + // other error + return err + } - // index documents in bulk - count, err := client.IndexDocument(docs) - if err != nil { - return err - } + // read index mapping file + file, err := os.Open(filePath) + if err != nil { + return err + } + defer func() { + _ = file.Close() + }() - resultBytes, err := json.MarshalIndent(count, "", " ") - if err != nil { - return err - } + if bulk { + // jsonl + docs := make([]*index.Document, 0) + reader := bufio.NewReader(file) + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) + if err != nil { + return err + } + docs = append(docs, doc) + } + break + } + } + + if len(docBytes) > 0 { + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) + if err != nil { + return err + } + docs = append(docs, doc) + } + } + + req := &index.BulkIndexRequest{ + Documents: docs, + } + res, err := client.BulkIndex(req) + if err != nil { + return err + } + + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + } else { + // json + docBytes, err := ioutil.ReadAll(file) + if err != nil { + return err + } + var docMap map[string]interface{} + err = json.Unmarshal(docBytes, &docMap) + if err != nil { + return err + } + + fieldsAny := &any.Any{} + err = protobuf.UnmarshalAny(docMap["fields"].(map[string]interface{}), fieldsAny) + if err != nil { + return err + } + + req := &index.IndexRequest{ + Id: docMap["id"].(string), + Fields: fieldsAny, + } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) + res, err := client.Index(req) + if err != nil { + return err + } + + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + } + } else { + return errors.New("argument error") + } + } return nil } diff --git a/cmd/blast/indexer_node_health.go b/cmd/blast/indexer_node_health.go index aedb6eb..e818992 100644 --- a/cmd/blast/indexer_node_health.go +++ b/cmd/blast/indexer_node_health.go @@ -40,30 +40,40 @@ func indexerNodeHealth(c *cli.Context) error { } }() - var state string + var res *index.NodeHealthCheckResponse if healthiness { - state, err = client.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) + req := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_HEALTHINESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = index.NodeHealthCheckResponse_UNHEALTHY.String() + res = &index.NodeHealthCheckResponse{State: index.NodeHealthCheckResponse_UNHEALTHY} } } else if liveness { - state, err = client.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) + req := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_LIVENESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = index.NodeHealthCheckResponse_DEAD.String() + res = &index.NodeHealthCheckResponse{State: index.NodeHealthCheckResponse_DEAD} } } else if readiness { - state, err = client.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) + req := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_READINESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = index.NodeHealthCheckResponse_NOT_READY.String() + res = &index.NodeHealthCheckResponse{State: index.NodeHealthCheckResponse_NOT_READY} } } else { - state, err = client.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) + req := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_HEALTHINESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = index.NodeHealthCheckResponse_UNHEALTHY.String() + res = &index.NodeHealthCheckResponse{State: index.NodeHealthCheckResponse_UNHEALTHY} } } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + marshaler := indexer.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) return nil } diff --git a/cmd/blast/indexer_node_info.go b/cmd/blast/indexer_node_info.go index 0ab3ad5..610403f 100644 --- a/cmd/blast/indexer_node_info.go +++ b/cmd/blast/indexer_node_info.go @@ -15,10 +15,10 @@ package main import ( - "encoding/json" "fmt" "os" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/indexer" "github.com/urfave/cli" ) @@ -37,12 +37,16 @@ func indexerNodeInfo(c *cli.Context) error { } }() - node, err := client.NodeInfo() + req := &empty.Empty{} + + res, err := client.NodeInfo(req) if err != nil { return err } - nodeBytes, err := json.MarshalIndent(node, "", " ") + marshaler := indexer.JsonMarshaler{} + + nodeBytes, err := marshaler.Marshal(res) if err != nil { return err } diff --git a/cmd/blast/indexer_search.go b/cmd/blast/indexer_search.go index 5c250dc..2a7d4b0 100644 --- a/cmd/blast/indexer_search.go +++ b/cmd/blast/indexer_search.go @@ -16,23 +16,27 @@ package main import ( "encoding/json" + "errors" "fmt" "io/ioutil" "os" "github.com/blevesearch/bleve" + "github.com/golang/protobuf/ptypes/any" "github.com/mosuka/blast/indexer" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" ) func indexerSearch(c *cli.Context) error { grpcAddr := c.String("grpc-address") - searchRequestPath := c.String("file") + filePath := c.String("file") searchRequest := bleve.NewSearchRequest(nil) - if searchRequestPath != "" { - _, err := os.Stat(searchRequestPath) + if filePath != "" { + _, err := os.Stat(filePath) if err != nil { if os.IsNotExist(err) { // does not exist @@ -43,23 +47,36 @@ func indexerSearch(c *cli.Context) error { } // open file - searchRequestFile, err := os.Open(searchRequestPath) + file, err := os.Open(filePath) if err != nil { return err } defer func() { - _ = searchRequestFile.Close() + _ = file.Close() }() // read file - searchRequestBytes, err := ioutil.ReadAll(searchRequestFile) + fileBytes, err := ioutil.ReadAll(file) if err != nil { return err } // create search request - if searchRequestBytes != nil { - err := json.Unmarshal(searchRequestBytes, searchRequest) + if fileBytes != nil { + var tmpValue map[string]interface{} + err = json.Unmarshal(fileBytes, &tmpValue) + if err != nil { + return err + } + searchRequestMap, ok := tmpValue["search_request"] + if !ok { + return errors.New("search_request does not exist") + } + searchRequestBytes, err := json.Marshal(searchRequestMap) + if err != nil { + return err + } + err = json.Unmarshal(searchRequestBytes, &searchRequest) if err != nil { return err } @@ -77,17 +94,26 @@ func indexerSearch(c *cli.Context) error { } }() - searchResult, err := client.Search(searchRequest) + searchRequestAny := &any.Any{} + err = protobuf.UnmarshalAny(searchRequest, searchRequestAny) + if err != nil { + return err + } + + req := &index.SearchRequest{SearchRequest: searchRequestAny} + + res, err := client.Search(req) if err != nil { return err } - jsonBytes, err := json.MarshalIndent(&searchResult, "", " ") + marshaler := indexer.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(jsonBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) return nil } diff --git a/cmd/blast/indexer_snapshot.go b/cmd/blast/indexer_snapshot.go index c34459a..bad2cf5 100644 --- a/cmd/blast/indexer_snapshot.go +++ b/cmd/blast/indexer_snapshot.go @@ -18,6 +18,7 @@ import ( "fmt" "os" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/indexer" "github.com/urfave/cli" ) @@ -36,10 +37,20 @@ func indexerSnapshot(c *cli.Context) error { } }() - err = client.Snapshot() + req := &empty.Empty{} + + res, err := client.Snapshot(req) + if err != nil { + return err + } + + marshaler := indexer.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) if err != nil { return err } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + return nil } diff --git a/cmd/blast/indexer_start.go b/cmd/blast/indexer_start.go index b20e689..d01b076 100644 --- a/cmd/blast/indexer_start.go +++ b/cmd/blast/indexer_start.go @@ -33,6 +33,7 @@ func indexerStart(c *cli.Context) error { peerGRPCAddr := c.String("peer-grpc-address") grpcAddr := c.String("grpc-address") + grpcGatewayAddr := c.String("grpc-gateway-address") httpAddr := c.String("http-address") nodeId := c.String("node-id") @@ -98,8 +99,9 @@ func indexerStart(c *cli.Context) error { BindAddress: nodeAddr, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddr, - HttpAddress: httpAddr, + GrpcAddress: grpcAddr, + GrpcGatewayAddress: grpcGatewayAddr, + HttpAddress: httpAddr, }, } diff --git a/cmd/blast/main.go b/cmd/blast/main.go index e889f32..7183f17 100644 --- a/cmd/blast/main.go +++ b/cmd/blast/main.go @@ -57,6 +57,12 @@ func main() { EnvVar: "BLAST_MANAGER_GRPC_ADDRESS", Usage: "The gRPC listen address", }, + cli.StringFlag{ + Name: "grpc-gateway-address", + Value: ":6100", + EnvVar: "BLAST_MANAGER_GRPC_GATEWAY_ADDRESS", + Usage: "The gRPC gateway listen address", + }, cli.StringFlag{ Name: "http-address", Value: ":8100", @@ -399,6 +405,12 @@ func main() { EnvVar: "BLAST_INDEXER_GRPC_ADDRESS", Usage: "The gRPC listen address", }, + cli.StringFlag{ + Name: "grpc-gateway-address", + Value: ":6000", + EnvVar: "BLAST_INDEXER_GRPC_GATEWAY_ADDRESS", + Usage: "The gRPC gateway listen address", + }, cli.StringFlag{ Name: "http-address", Value: ":8000", @@ -758,6 +770,12 @@ func main() { EnvVar: "BLAST_DISPATCHER_GRPC_ADDRESS", Usage: "The gRPC listen address", }, + cli.StringFlag{ + Name: "grpc-gateway-address", + Value: ":6200", + EnvVar: "BLAST_DISPATCHER_GRPC_GATEWAY_ADDRESS", + Usage: "The gRPC gateway listen address", + }, cli.StringFlag{ Name: "http-address", Value: ":8200", diff --git a/cmd/blast/manager_cluster_info.go b/cmd/blast/manager_cluster_info.go index 2ccc08d..8b0a25a 100644 --- a/cmd/blast/manager_cluster_info.go +++ b/cmd/blast/manager_cluster_info.go @@ -15,10 +15,10 @@ package main import ( - "encoding/json" "fmt" "os" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/manager" "github.com/urfave/cli" ) @@ -37,17 +37,19 @@ func managerClusterInfo(c *cli.Context) error { } }() - cluster, err := client.ClusterInfo() + req := &empty.Empty{} + res, err := client.ClusterInfo(req) if err != nil { return err } - clusterBytes, err := json.MarshalIndent(cluster, "", " ") + marshaler := manager.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(clusterBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) return nil } diff --git a/cmd/blast/manager_cluster_leave.go b/cmd/blast/manager_cluster_leave.go index 408f0ec..12ae8e1 100644 --- a/cmd/blast/manager_cluster_leave.go +++ b/cmd/blast/manager_cluster_leave.go @@ -19,6 +19,7 @@ import ( "os" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf/management" "github.com/urfave/cli" ) @@ -42,10 +43,21 @@ func managerClusterLeave(c *cli.Context) error { } }() - err = client.ClusterLeave(nodeId) + req := &management.ClusterLeaveRequest{ + Id: nodeId, + } + res, err := client.ClusterLeave(req) + if err != nil { + return err + } + + marshaler := manager.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) if err != nil { return err } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + return nil } diff --git a/cmd/blast/manager_cluster_watch.go b/cmd/blast/manager_cluster_watch.go index 775350b..320965c 100644 --- a/cmd/blast/manager_cluster_watch.go +++ b/cmd/blast/manager_cluster_watch.go @@ -15,12 +15,12 @@ package main import ( - "encoding/json" "fmt" "io" "log" "os" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/manager" "github.com/mosuka/blast/protobuf/management" "github.com/urfave/cli" @@ -40,28 +40,31 @@ func managerClusterWatch(c *cli.Context) error { } }() - cluster, err := client.ClusterInfo() + marshaler := manager.JsonMarshaler{} + + req := &empty.Empty{} + res, err := client.ClusterInfo(req) if err != nil { return err } resp := &management.ClusterWatchResponse{ Event: 0, Node: nil, - Cluster: cluster, + Cluster: res.Cluster, } - clusterBytes, err := json.MarshalIndent(resp, "", " ") + resBytes, err := marshaler.Marshal(resp) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(clusterBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - watchClient, err := client.ClusterWatch() + watchClient, err := client.ClusterWatch(req) if err != nil { return err } for { - resp, err := watchClient.Recv() + resp, err = watchClient.Recv() if err == io.EOF { break } @@ -70,11 +73,11 @@ func managerClusterWatch(c *cli.Context) error { break } - clusterBytes, err := json.MarshalIndent(resp, "", " ") + resBytes, err = marshaler.Marshal(resp) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(clusterBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) } return nil diff --git a/cmd/blast/manager_delete.go b/cmd/blast/manager_delete.go index e6c41e6..0caf391 100644 --- a/cmd/blast/manager_delete.go +++ b/cmd/blast/manager_delete.go @@ -20,6 +20,7 @@ import ( "os" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf/management" "github.com/urfave/cli" ) @@ -43,10 +44,21 @@ func managerDelete(c *cli.Context) error { } }() - err = client.Delete(key) + req := &management.DeleteRequest{ + Key: key, + } + res, err := client.Delete(req) + if err != nil { + return err + } + + marshaler := manager.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) if err != nil { return err } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + return nil } diff --git a/cmd/blast/manager_get.go b/cmd/blast/manager_get.go index f0e3fe7..6b41f0e 100644 --- a/cmd/blast/manager_get.go +++ b/cmd/blast/manager_get.go @@ -15,11 +15,11 @@ package main import ( - "encoding/json" "fmt" "os" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf/management" "github.com/urfave/cli" ) @@ -39,16 +39,22 @@ func managerGet(c *cli.Context) error { } }() - value, err := client.Get(key) + req := &management.GetRequest{ + Key: key, + } + + res, err := client.Get(req) if err != nil { return err } - valueBytes, err := json.MarshalIndent(value, "", " ") + marshaler := manager.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(valueBytes))) + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) return nil } diff --git a/cmd/blast/manager_node_health.go b/cmd/blast/manager_node_health.go index 9967f91..e2eb209 100644 --- a/cmd/blast/manager_node_health.go +++ b/cmd/blast/manager_node_health.go @@ -40,30 +40,40 @@ func managerNodeHealthCheck(c *cli.Context) error { } }() - var state string + var res *management.NodeHealthCheckResponse if healthiness { - state, err = client.NodeHealthCheck(management.NodeHealthCheckRequest_HEALTHINESS.String()) + req := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_HEALTHINESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = management.NodeHealthCheckResponse_UNHEALTHY.String() + res = &management.NodeHealthCheckResponse{State: management.NodeHealthCheckResponse_UNHEALTHY} } } else if liveness { - state, err = client.NodeHealthCheck(management.NodeHealthCheckRequest_LIVENESS.String()) + req := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_LIVENESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = management.NodeHealthCheckResponse_DEAD.String() + res = &management.NodeHealthCheckResponse{State: management.NodeHealthCheckResponse_DEAD} } } else if readiness { - state, err = client.NodeHealthCheck(management.NodeHealthCheckRequest_READINESS.String()) + req := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_READINESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = management.NodeHealthCheckResponse_NOT_READY.String() + res = &management.NodeHealthCheckResponse{State: management.NodeHealthCheckResponse_NOT_READY} } } else { - state, err = client.NodeHealthCheck(management.NodeHealthCheckRequest_HEALTHINESS.String()) + req := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_HEALTHINESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = management.NodeHealthCheckResponse_UNHEALTHY.String() + res = &management.NodeHealthCheckResponse{State: management.NodeHealthCheckResponse_UNHEALTHY} } } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + marshaler := manager.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) return nil } diff --git a/cmd/blast/manager_node_info.go b/cmd/blast/manager_node_info.go index 85314a2..ca190e1 100644 --- a/cmd/blast/manager_node_info.go +++ b/cmd/blast/manager_node_info.go @@ -15,10 +15,10 @@ package main import ( - "encoding/json" "fmt" "os" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/manager" "github.com/urfave/cli" ) @@ -37,17 +37,19 @@ func managerNodeInfo(c *cli.Context) error { } }() - node, err := client.NodeInfo() + req := &empty.Empty{} + res, err := client.NodeInfo(req) if err != nil { return err } - nodeBytes, err := json.MarshalIndent(node, "", " ") + marshaler := manager.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(nodeBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) return nil } diff --git a/cmd/blast/manager_set.go b/cmd/blast/manager_set.go index 02c3fa1..f7bdac8 100644 --- a/cmd/blast/manager_set.go +++ b/cmd/blast/manager_set.go @@ -20,7 +20,10 @@ import ( "fmt" "os" + "github.com/golang/protobuf/ptypes/any" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/management" "github.com/urfave/cli" ) @@ -61,10 +64,29 @@ func managerSet(c *cli.Context) error { } }() - err = client.Set(key, value) + valueAny := &any.Any{} + err = protobuf.UnmarshalAny(value, valueAny) if err != nil { return err } + req := &management.SetRequest{ + Key: key, + Value: valueAny, + } + + res, err := client.Set(req) + if err != nil { + return err + } + + marshaler := manager.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + return nil } diff --git a/cmd/blast/manager_snapshot.go b/cmd/blast/manager_snapshot.go index 8dd9b71..f252e34 100644 --- a/cmd/blast/manager_snapshot.go +++ b/cmd/blast/manager_snapshot.go @@ -18,6 +18,7 @@ import ( "fmt" "os" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/manager" "github.com/urfave/cli" ) @@ -36,10 +37,19 @@ func managerSnapshot(c *cli.Context) error { } }() - err = client.Snapshot() + req := &empty.Empty{} + res, err := client.Snapshot(req) if err != nil { return err } + marshaler := manager.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + return nil } diff --git a/cmd/blast/manager_start.go b/cmd/blast/manager_start.go index 81385ab..94a7445 100644 --- a/cmd/blast/manager_start.go +++ b/cmd/blast/manager_start.go @@ -31,6 +31,7 @@ func managerStart(c *cli.Context) error { peerGrpcAddr := c.String("peer-grpc-address") grpcAddr := c.String("grpc-address") + grpcGatewayAddr := c.String("grpc-gateway-address") httpAddr := c.String("http-address") nodeId := c.String("node-id") @@ -96,8 +97,9 @@ func managerStart(c *cli.Context) error { BindAddress: nodeAddr, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddr, - HttpAddress: httpAddr, + GrpcAddress: grpcAddr, + GrpcGatewayAddress: grpcGatewayAddr, + HttpAddress: httpAddr, }, } diff --git a/cmd/blast/manager_watch.go b/cmd/blast/manager_watch.go index bab09af..ff010df 100644 --- a/cmd/blast/manager_watch.go +++ b/cmd/blast/manager_watch.go @@ -15,15 +15,13 @@ package main import ( - "encoding/json" - "errors" "fmt" "io" "log" "os" "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/management" "github.com/urfave/cli" ) @@ -43,11 +41,16 @@ func managerWatch(c *cli.Context) error { } }() - watchClient, err := client.Watch(key) + req := &management.WatchRequest{ + Key: key, + } + watchClient, err := client.Watch(req) if err != nil { return err } + marshaler := manager.JsonMarshaler{} + for { resp, err := watchClient.Recv() if err == io.EOF { @@ -58,29 +61,13 @@ func managerWatch(c *cli.Context) error { break } - value, err := protobuf.MarshalAny(resp.Value) + respBytes, err := marshaler.Marshal(resp) if err != nil { - return err - } - if value == nil { - return errors.New("nil") + log.Println(err.Error()) + break } - var valueBytes []byte - switch value.(type) { - case *map[string]interface{}: - valueMap := *value.(*map[string]interface{}) - valueBytes, err = json.Marshal(valueMap) - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%s %s %v", resp.Command.String(), resp.Key, string(valueBytes))) - case *string: - valueStr := *value.(*string) - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%s %s %s", resp.Command.String(), resp.Key, valueStr)) - default: - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%s %s %v", resp.Command.String(), resp.Key, &value)) - } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) } return nil diff --git a/dispatcher/grpc_client.go b/dispatcher/grpc_client.go index b7cf07d..5ca4658 100644 --- a/dispatcher/grpc_client.go +++ b/dispatcher/grpc_client.go @@ -16,18 +16,11 @@ package dispatcher import ( "context" - "errors" "math" - "github.com/blevesearch/bleve" - "github.com/golang/protobuf/ptypes/any" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/protobuf" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/protobuf/distribute" - "github.com/mosuka/blast/protobuf/index" "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) type GRPCClient struct { @@ -96,134 +89,30 @@ func (c *GRPCClient) GetAddress() string { return c.conn.Target() } -func (c *GRPCClient) NodeHealthCheck(probe string, opts ...grpc.CallOption) (string, error) { - req := &distribute.NodeHealthCheckRequest{} - - switch probe { - case distribute.NodeHealthCheckRequest_HEALTHINESS.String(): - req.Probe = distribute.NodeHealthCheckRequest_HEALTHINESS - case distribute.NodeHealthCheckRequest_LIVENESS.String(): - req.Probe = distribute.NodeHealthCheckRequest_LIVENESS - case distribute.NodeHealthCheckRequest_READINESS.String(): - req.Probe = distribute.NodeHealthCheckRequest_READINESS - default: - req.Probe = distribute.NodeHealthCheckRequest_HEALTHINESS - } - - resp, err := c.client.NodeHealthCheck(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - return distribute.NodeHealthCheckResponse_UNHEALTHY.String(), errors.New(st.Message()) - } - - return resp.State.String(), nil +func (c *GRPCClient) NodeHealthCheck(req *distribute.NodeHealthCheckRequest, opts ...grpc.CallOption) (*distribute.NodeHealthCheckResponse, error) { + return c.client.NodeHealthCheck(c.ctx, req, opts...) } -func (c *GRPCClient) GetDocument(id string, opts ...grpc.CallOption) (*index.Document, error) { - req := &distribute.GetDocumentRequest{ - Id: id, - } - - resp, err := c.client.GetDocument(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - switch st.Code() { - case codes.NotFound: - return nil, blasterrors.ErrNotFound - default: - return nil, errors.New(st.Message()) - } - } - - return resp.Document, nil +func (c *GRPCClient) Get(req *distribute.GetRequest, opts ...grpc.CallOption) (*distribute.GetResponse, error) { + return c.client.Get(c.ctx, req, opts...) } -func (c *GRPCClient) Search(searchRequest *bleve.SearchRequest, opts ...grpc.CallOption) (*bleve.SearchResult, error) { - // bleve.SearchRequest -> Any - searchRequestAny := &any.Any{} - err := protobuf.UnmarshalAny(searchRequest, searchRequestAny) - if err != nil { - return nil, err - } - - req := &distribute.SearchRequest{ - SearchRequest: searchRequestAny, - } - - resp, err := c.client.Search(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - - // Any -> bleve.SearchResult - searchResultInstance, err := protobuf.MarshalAny(resp.SearchResult) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - if searchResultInstance == nil { - return nil, errors.New("nil") - } - searchResult := searchResultInstance.(*bleve.SearchResult) - - return searchResult, nil +func (c *GRPCClient) Index(req *distribute.IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.Index(c.ctx, req, opts...) } -func (c *GRPCClient) IndexDocument(docs []*index.Document, opts ...grpc.CallOption) (int, error) { - stream, err := c.client.IndexDocument(c.ctx, opts...) - if err != nil { - st, _ := status.FromError(err) - - return -1, errors.New(st.Message()) - } - - for _, doc := range docs { - req := &distribute.IndexDocumentRequest{ - Document: doc, - } - - err = stream.Send(req) - if err != nil { - return -1, err - } - } - - resp, err := stream.CloseAndRecv() - if err != nil { - return -1, err - } - - return int(resp.Count), nil +func (c *GRPCClient) Delete(req *distribute.DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.Delete(c.ctx, req, opts...) } -func (c *GRPCClient) DeleteDocument(ids []string, opts ...grpc.CallOption) (int, error) { - stream, err := c.client.DeleteDocument(c.ctx, opts...) - if err != nil { - st, _ := status.FromError(err) - - return -1, errors.New(st.Message()) - } - - for _, id := range ids { - req := &distribute.DeleteDocumentRequest{ - Id: id, - } - - err := stream.Send(req) - if err != nil { - return -1, err - } - } +func (c *GRPCClient) BulkIndex(req *distribute.BulkIndexRequest, opts ...grpc.CallOption) (*distribute.BulkIndexResponse, error) { + return c.client.BulkIndex(c.ctx, req, opts...) +} - resp, err := stream.CloseAndRecv() - if err != nil { - return -1, err - } +func (c *GRPCClient) BulkDelete(req *distribute.BulkDeleteRequest, opts ...grpc.CallOption) (*distribute.BulkDeleteResponse, error) { + return c.client.BulkDelete(c.ctx, req, opts...) +} - return int(resp.Count), nil +func (c *GRPCClient) Search(req *distribute.SearchRequest, opts ...grpc.CallOption) (*distribute.SearchResponse, error) { + return c.client.Search(c.ctx, req, opts...) } diff --git a/dispatcher/grpc_gateway.go b/dispatcher/grpc_gateway.go new file mode 100644 index 0000000..f962b4e --- /dev/null +++ b/dispatcher/grpc_gateway.go @@ -0,0 +1,353 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dispatcher + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + + "github.com/blevesearch/bleve" + "github.com/golang/protobuf/ptypes/any" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/distribute" + "github.com/mosuka/blast/protobuf/index" + "go.uber.org/zap" + "google.golang.org/grpc" +) + +type JsonMarshaler struct{} + +// ContentType always Returns "application/json". +func (*JsonMarshaler) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JsonMarshaler) Marshal(v interface{}) ([]byte, error) { + switch v.(type) { + case *distribute.GetResponse: + value, err := protobuf.MarshalAny(v.(*distribute.GetResponse).Fields) + if err != nil { + return nil, err + } + return json.Marshal( + map[string]interface{}{ + "fields": value, + }, + ) + case *distribute.SearchResponse: + value, err := protobuf.MarshalAny(v.(*distribute.SearchResponse).SearchResult) + if err != nil { + return nil, err + } + return json.Marshal( + map[string]interface{}{ + "search_result": value, + }, + ) + default: + return json.Marshal(v) + } +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JsonMarshaler) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JsonMarshaler) NewDecoder(r io.Reader) runtime.Decoder { + return runtime.DecoderFunc( + func(v interface{}) error { + buffer, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + switch v.(type) { + case *distribute.IndexRequest: + var tmpValue map[string]interface{} + err = json.Unmarshal(buffer, &tmpValue) + if err != nil { + return err + } + id, ok := tmpValue["id"].(string) + if ok { + v.(*distribute.IndexRequest).Id = id + } + + fields, ok := tmpValue["fields"] + if !ok { + return errors.New("value does not exist") + } + v.(*distribute.IndexRequest).Fields = &any.Any{} + return protobuf.UnmarshalAny(fields, v.(*distribute.IndexRequest).Fields) + case *distribute.SearchRequest: + var tmpValue map[string]interface{} + err = json.Unmarshal(buffer, &tmpValue) + if err != nil { + return err + } + searchRequestMap, ok := tmpValue["search_request"] + if !ok { + return errors.New("value does not exist") + } + searchRequestBytes, err := json.Marshal(searchRequestMap) + if err != nil { + return err + } + var searchRequest *bleve.SearchRequest + err = json.Unmarshal(searchRequestBytes, &searchRequest) + if err != nil { + return err + } + v.(*distribute.SearchRequest).SearchRequest = &any.Any{} + return protobuf.UnmarshalAny(searchRequest, v.(*distribute.SearchRequest).SearchRequest) + default: + return json.Unmarshal(buffer, v) + } + }, + ) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JsonMarshaler) NewEncoder(w io.Writer) runtime.Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JsonMarshaler) Delimiter() []byte { + return []byte("\n") +} + +type JsonlMarshaler struct{} + +// ContentType always Returns "application/json". +func (*JsonlMarshaler) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JsonlMarshaler) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JsonlMarshaler) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads JSON-LINE stream from "r". +func (j *JsonlMarshaler) NewDecoder(r io.Reader) runtime.Decoder { + return runtime.DecoderFunc( + func(v interface{}) error { + buffer, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + switch v.(type) { + case *distribute.BulkIndexRequest: + docs := make([]*index.Document, 0) + reader := bufio.NewReader(bytes.NewReader(buffer)) + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) + if err != nil { + return err + } + docs = append(docs, doc) + } + break + } + } + + if len(docBytes) > 0 { + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) + if err != nil { + return err + } + docs = append(docs, doc) + } + } + v.(*distribute.BulkIndexRequest).Documents = docs + return nil + default: + return json.Unmarshal(buffer, v) + } + }, + ) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JsonlMarshaler) NewEncoder(w io.Writer) runtime.Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JsonlMarshaler) Delimiter() []byte { + return []byte("\n") +} + +type TextMarshaler struct{} + +// ContentType always Returns "application/json". +func (*TextMarshaler) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *TextMarshaler) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal unmarshals JSON data into "v". +func (j *TextMarshaler) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads text stream from "r". +func (j *TextMarshaler) NewDecoder(r io.Reader) runtime.Decoder { + return runtime.DecoderFunc( + func(v interface{}) error { + buffer, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + switch v.(type) { + case *distribute.BulkDeleteRequest: + ids := make([]string, 0) + reader := bufio.NewReader(bytes.NewReader(buffer)) + for { + //idBytes, err := reader.ReadBytes('\n') + idBytes, _, err := reader.ReadLine() + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(idBytes) > 0 { + ids = append(ids, string(idBytes)) + } + break + } + } + + if len(idBytes) > 0 { + ids = append(ids, string(idBytes)) + } + } + v.(*distribute.BulkDeleteRequest).Ids = ids + return nil + default: + return json.Unmarshal(buffer, v) + } + }, + ) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *TextMarshaler) NewEncoder(w io.Writer) runtime.Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *TextMarshaler) Delimiter() []byte { + return []byte("\n") +} + +type GRPCGateway struct { + grpcGatewayAddr string + grpcAddr string + logger *zap.Logger + + ctx context.Context + cancel context.CancelFunc + listener net.Listener +} + +func NewGRPCGateway(grpcGatewayAddr string, grpcAddr string, logger *zap.Logger) (*GRPCGateway, error) { + return &GRPCGateway{ + grpcGatewayAddr: grpcGatewayAddr, + grpcAddr: grpcAddr, + logger: logger, + }, nil +} + +func (s *GRPCGateway) Start() error { + s.ctx, s.cancel = NewGRPCContext() + + mux := runtime.NewServeMux( + runtime.WithMarshalerOption("application/json", new(JsonMarshaler)), + runtime.WithMarshalerOption("application/x-ndjson", new(JsonlMarshaler)), + runtime.WithMarshalerOption("text/plain", new(TextMarshaler)), + ) + opts := []grpc.DialOption{grpc.WithInsecure()} + + err := distribute.RegisterDistributeHandlerFromEndpoint(s.ctx, mux, s.grpcAddr, opts) + if err != nil { + return err + } + + s.listener, err = net.Listen("tcp", s.grpcGatewayAddr) + if err != nil { + return err + } + + err = http.Serve(s.listener, mux) + if err != nil { + return err + } + + return nil +} + +func (s *GRPCGateway) Stop() error { + defer s.cancel() + + err := s.listener.Close() + if err != nil { + return err + } + + return nil +} + +func (s *GRPCGateway) GetAddress() (string, error) { + tcpAddr, err := net.ResolveTCPAddr("tcp", s.listener.Addr().String()) + if err != nil { + return "", err + } + + v4Addr := "" + if tcpAddr.IP.To4() != nil { + v4Addr = tcpAddr.IP.To4().String() + } + port := tcpAddr.Port + + return fmt.Sprintf("%s:%d", v4Addr, port), nil +} diff --git a/dispatcher/grpc_service.go b/dispatcher/grpc_service.go index e49e63d..0657119 100644 --- a/dispatcher/grpc_service.go +++ b/dispatcher/grpc_service.go @@ -28,6 +28,7 @@ import ( "github.com/blevesearch/bleve" "github.com/blevesearch/bleve/search" "github.com/golang/protobuf/ptypes/any" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/indexer" "github.com/mosuka/blast/manager" "github.com/mosuka/blast/protobuf" @@ -145,13 +146,14 @@ func (s *GRPCService) getManagerCluster(managerAddr string) (*management.Cluster return nil, err } - managers, err := client.ClusterInfo() + req := &empty.Empty{} + res, err := client.ClusterInfo(req) if err != nil { s.logger.Error(err.Error()) return nil, err } - return managers, nil + return res.Cluster, nil } func (s *GRPCService) cloneManagerCluster(cluster *management.Cluster) (*management.Cluster, error) { @@ -191,7 +193,8 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { } // create stream for watching cluster changes - stream, err := client.ClusterWatch() + req := &empty.Empty{} + stream, err := client.ClusterWatch(req) if err != nil { s.logger.Error(err.Error()) continue @@ -199,10 +202,10 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.logger.Info("wait for receive a manager cluster updates from stream") resp, err := stream.Recv() - //if err == io.EOF { - // s.logger.Info(err.Error()) - // continue - //} + if err == io.EOF { + s.logger.Info(err.Error()) + continue + } if err != nil { s.logger.Error(err.Error()) continue @@ -318,31 +321,36 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { } // get initial indexers - shards, err := client.Get("/cluster/shards") + req := &management.GetRequest{ + Key: "/cluster/shards", + } + res, err := client.Get(req) if err != nil { s.logger.Fatal(err.Error()) return } - if shards == nil { + if res.Value == nil { s.logger.Error("/cluster/shards is nil") } - for shardId, shardIntr := range *shards.(*map[string]interface{}) { - shardBytes, err := json.Marshal(shardIntr) + + shards, err := protobuf.MarshalAny(res.Value) + for shardId, shard := range *shards.(*map[string]interface{}) { + shardBytes, err := json.Marshal(shard) if err != nil { s.logger.Error(err.Error()) continue } - var shard *index.Cluster - err = json.Unmarshal(shardBytes, &shard) + var cluster *index.Cluster + err = json.Unmarshal(shardBytes, &cluster) if err != nil { s.logger.Error(err.Error()) continue } - s.indexers[shardId] = shard + s.indexers[shardId] = cluster - for nodeId, node := range shard.Nodes { + for nodeId, node := range cluster.Nodes { if node.Metadata.GrpcAddress == "" { s.logger.Warn("missing gRPC address", zap.String("id", node.Id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) continue @@ -371,7 +379,10 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { continue } - stream, err := client.Watch("/cluster/shards/") + watchReq := &management.WatchRequest{ + Key: "/cluster/shards/", + } + stream, err := client.Watch(watchReq) if err != nil { s.logger.Error(err.Error()) continue @@ -388,37 +399,42 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { } s.logger.Debug("data has changed", zap.Any("command", resp.Command), zap.String("key", resp.Key), zap.Any("value", resp.Value)) - shardsIntr, err := client.Get("/cluster/shards/") + getReq := &management.GetRequest{ + Key: "/cluster/shards/", + } + res, err := client.Get(getReq) if err != nil { s.logger.Error(err.Error()) continue } - if shardsIntr == nil { + if res.Value == nil { s.logger.Error("/cluster/shards is nil") continue } - for shardId, shardIntr := range *shards.(*map[string]interface{}) { - shardBytes, err := json.Marshal(shardIntr) + + shards, err := protobuf.MarshalAny(res.Value) + for shardId, shard := range *shards.(*map[string]interface{}) { + shardBytes, err := json.Marshal(shard) if err != nil { s.logger.Error(err.Error()) continue } - var shard *index.Cluster - err = json.Unmarshal(shardBytes, &shard) + var cluster *index.Cluster + err = json.Unmarshal(shardBytes, &cluster) if err != nil { s.logger.Error(err.Error()) continue } - s.indexers[shardId] = shard + s.indexers[shardId] = cluster if _, exist := s.indexerClients[shardId]; !exist { s.indexerClients[shardId] = make(map[string]*indexer.GRPCClient) } // open clients for indexer nodes - for nodeId, node := range shard.Nodes { + for nodeId, node := range cluster.Nodes { if node.Metadata.GrpcAddress == "" { s.logger.Warn("missing gRPC address", zap.String("id", node.Id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) continue @@ -514,18 +530,24 @@ func (s *GRPCService) NodeHealthCheck(ctx context.Context, req *distribute.NodeH resp := &distribute.NodeHealthCheckResponse{} switch req.Probe { + case distribute.NodeHealthCheckRequest_UNKNOWN: + fallthrough case distribute.NodeHealthCheckRequest_HEALTHINESS: resp.State = distribute.NodeHealthCheckResponse_HEALTHY case distribute.NodeHealthCheckRequest_LIVENESS: resp.State = distribute.NodeHealthCheckResponse_ALIVE case distribute.NodeHealthCheckRequest_READINESS: resp.State = distribute.NodeHealthCheckResponse_READY + default: + err := errors.New("unknown probe") + s.logger.Error(err.Error()) + return resp, status.Error(codes.InvalidArgument, err.Error()) } return resp, nil } -func (s *GRPCService) GetDocument(ctx context.Context, req *distribute.GetDocumentRequest) (*distribute.GetDocumentResponse, error) { +func (s *GRPCService) Get(ctx context.Context, req *distribute.GetRequest) (*distribute.GetResponse, error) { indexerClients := s.getIndexerClients() // cluster id list sorted by cluster id @@ -537,7 +559,7 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *distribute.GetDocume type respVal struct { clusterId string - doc *index.Document + res *index.GetResponse err error } @@ -549,11 +571,15 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *distribute.GetDocume wg.Add(1) go func(clusterId string, client *indexer.GRPCClient, id string, respChan chan respVal) { // index documents - doc, err := client.GetDocument(id) + req := &index.GetRequest{ + Id: id, + } + res, err := client.Get(req) + wg.Done() respChan <- respVal{ clusterId: clusterId, - doc: doc, + res: res, err: err, } }(clusterId, client, req.Id, respChan) @@ -564,28 +590,35 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *distribute.GetDocume close(respChan) // summarize responses - var doc *index.Document + iRes := &index.GetResponse{} for r := range respChan { - if r.doc != nil { - doc = r.doc + if r.res != nil { + iRes = r.res } if r.err != nil { s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) } } - resp := &distribute.GetDocumentResponse{} - - // response - resp.Document = doc + resp := &distribute.GetResponse{ + Fields: iRes.Fields, + } return resp, nil } -func (s *GRPCService) Search(ctx context.Context, req *distribute.SearchRequest) (*distribute.SearchResponse, error) { - start := time.Now() +func (s *GRPCService) docIdHash(docId string) uint64 { + hash := fnv.New64() + _, err := hash.Write([]byte(docId)) + if err != nil { + return 0 + } - resp := &distribute.SearchResponse{} + return hash.Sum64() +} + +func (s *GRPCService) Index(ctx context.Context, req *distribute.IndexRequest) (*empty.Empty, error) { + res := &empty.Empty{} indexerClients := s.getIndexerClients() @@ -596,127 +629,73 @@ func (s *GRPCService) Search(ctx context.Context, req *distribute.SearchRequest) sort.Strings(clusterIds) } - type respVal struct { - clusterId string - searchResult *bleve.SearchResult - err error - } + docIdHash := s.docIdHash(req.Id) + clusterNum := uint64(len(indexerClients)) + clusterId := clusterIds[int(docIdHash%clusterNum)] - // create response channel - respChan := make(chan respVal, len(clusterIds)) + iReq := &index.IndexRequest{ + Id: req.Id, + Fields: req.Fields, + } - // create search request - ins, err := protobuf.MarshalAny(req.SearchRequest) + res, err := indexerClients[clusterId].Index(iReq) if err != nil { s.logger.Error(err.Error()) - return resp, err + return res, status.Error(codes.Internal, err.Error()) } - searchRequest := ins.(*bleve.SearchRequest) - // change to distributed search request - from := searchRequest.From - size := searchRequest.Size - searchRequest.From = 0 - searchRequest.Size = from + size + return res, nil +} + +func (s *GRPCService) Delete(ctx context.Context, req *distribute.DeleteRequest) (*empty.Empty, error) { + resp := &empty.Empty{} + + indexerClients := s.getIndexerClients() + + // cluster id list sorted by cluster id + clusterIds := make([]string, 0) + for clusterId := range indexerClients { + clusterIds = append(clusterIds, clusterId) + sort.Strings(clusterIds) + } + + type respVal struct { + clusterId string + err error + } + + // create response channel + respChan := make(chan respVal, len(clusterIds)) wg := &sync.WaitGroup{} for clusterId, client := range indexerClients { wg.Add(1) - go func(clusterId string, client *indexer.GRPCClient, searchRequest *bleve.SearchRequest, respChan chan respVal) { - searchResult, err := client.Search(searchRequest) + go func(clusterId string, client *indexer.GRPCClient, id string, respChan chan respVal) { + // index documents + iReq := &index.DeleteRequest{Id: id} + _, err := client.Delete(iReq) wg.Done() respChan <- respVal{ - clusterId: clusterId, - searchResult: searchResult, - err: err, + clusterId: clusterId, + err: err, } - }(clusterId, client, searchRequest, respChan) + }(clusterId, client, req.Id, respChan) } wg.Wait() // close response channel close(respChan) - // revert to original search request - searchRequest.From = from - searchRequest.Size = size - - // summarize responses - var searchResult *bleve.SearchResult for r := range respChan { - if r.searchResult != nil { - if searchResult == nil { - searchResult = r.searchResult - } else { - searchResult.Merge(r.searchResult) - } - } if r.err != nil { s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) } } - // handle case where no results were successful - if searchResult == nil { - searchResult = &bleve.SearchResult{ - Status: &bleve.SearchStatus{ - Errors: make(map[string]error), - }, - } - } - - // sort all hits with the requested order - if len(searchRequest.Sort) > 0 { - sorter := sortutils.NewMultiSearchHitSorter(searchRequest.Sort, searchResult.Hits) - sort.Sort(sorter) - } - - // now skip over the correct From - if searchRequest.From > 0 && len(searchResult.Hits) > searchRequest.From { - searchResult.Hits = searchResult.Hits[searchRequest.From:] - } else if searchRequest.From > 0 { - searchResult.Hits = search.DocumentMatchCollection{} - } - - // now trim to the correct size - if searchRequest.Size > 0 && len(searchResult.Hits) > searchRequest.Size { - searchResult.Hits = searchResult.Hits[0:searchRequest.Size] - } - - // fix up facets - for name, fr := range searchRequest.Facets { - searchResult.Facets.Fixup(name, fr.Size) - } - - // fix up original request - searchResult.Request = searchRequest - searchDuration := time.Since(start) - searchResult.Took = searchDuration - - searchResultAny := &any.Any{} - err = protobuf.UnmarshalAny(searchResult, searchResultAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, err - } - - // response - resp.SearchResult = searchResultAny - return resp, nil } -func (s *GRPCService) docIdHash(docId string) uint64 { - hash := fnv.New64() - _, err := hash.Write([]byte(docId)) - if err != nil { - return 0 - } - - return hash.Sum64() -} - -func (s *GRPCService) IndexDocument(stream distribute.Distribute_IndexDocumentServer) error { +func (s *GRPCService) BulkIndex(ctx context.Context, req *distribute.BulkIndexRequest) (*distribute.BulkIndexResponse, error) { indexerClients := s.getIndexerClients() // cluster id list sorted by cluster id @@ -732,27 +711,17 @@ func (s *GRPCService) IndexDocument(stream distribute.Distribute_IndexDocumentSe docSet[clusterId] = make([]*index.Document, 0) } - for { - req, err := stream.Recv() - if err != nil { - if err == io.EOF { - s.logger.Debug(err.Error()) - break - } - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - + for _, doc := range req.Documents { // distribute documents to each cluster based on document id - docIdHash := s.docIdHash(req.Document.Id) + docIdHash := s.docIdHash(doc.Id) clusterNum := uint64(len(indexerClients)) clusterId := clusterIds[int(docIdHash%clusterNum)] - docSet[clusterId] = append(docSet[clusterId], req.Document) + docSet[clusterId] = append(docSet[clusterId], doc) } type respVal struct { clusterId string - count int + res *index.BulkIndexResponse err error } @@ -763,11 +732,14 @@ func (s *GRPCService) IndexDocument(stream distribute.Distribute_IndexDocumentSe for clusterId, docs := range docSet { wg.Add(1) go func(clusterId string, docs []*index.Document, respChan chan respVal) { - count, err := indexerClients[clusterId].IndexDocument(docs) + iReq := &index.BulkIndexRequest{ + Documents: docs, + } + iRes, err := indexerClients[clusterId].BulkIndex(iReq) wg.Done() respChan <- respVal{ clusterId: clusterId, - count: count, + res: iRes, err: err, } }(clusterId, docs, respChan) @@ -780,8 +752,8 @@ func (s *GRPCService) IndexDocument(stream distribute.Distribute_IndexDocumentSe // summarize responses totalCount := 0 for r := range respChan { - if r.count >= 0 { - totalCount += r.count + if r.res.Count >= 0 { + totalCount += int(r.res.Count) } if r.err != nil { s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) @@ -789,14 +761,12 @@ func (s *GRPCService) IndexDocument(stream distribute.Distribute_IndexDocumentSe } // response - resp := &distribute.IndexDocumentResponse{ + return &distribute.BulkIndexResponse{ Count: int32(totalCount), - } - - return stream.SendAndClose(resp) + }, nil } -func (s *GRPCService) DeleteDocument(stream distribute.Distribute_DeleteDocumentServer) error { +func (s *GRPCService) BulkDelete(ctx context.Context, req *distribute.BulkDeleteRequest) (*distribute.BulkDeleteResponse, error) { indexerClients := s.getIndexerClients() // cluster id list sorted by cluster id @@ -806,25 +776,9 @@ func (s *GRPCService) DeleteDocument(stream distribute.Distribute_DeleteDocument sort.Strings(clusterIds) } - ids := make([]string, 0) - - for { - req, err := stream.Recv() - if err != nil { - if err == io.EOF { - s.logger.Debug(err.Error()) - break - } - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - - ids = append(ids, req.Id) - } - type respVal struct { clusterId string - count int + res *index.BulkDeleteResponse err error } @@ -836,14 +790,17 @@ func (s *GRPCService) DeleteDocument(stream distribute.Distribute_DeleteDocument wg.Add(1) go func(clusterId string, client *indexer.GRPCClient, ids []string, respChan chan respVal) { // index documents - count, err := client.DeleteDocument(ids) + iReq := &index.BulkDeleteRequest{ + Ids: ids, + } + iRes, err := client.BulkDelete(iReq) wg.Done() respChan <- respVal{ clusterId: clusterId, - count: count, + res: iRes, err: err, } - }(clusterId, client, ids, respChan) + }(clusterId, client, req.Ids, respChan) } wg.Wait() @@ -851,17 +808,167 @@ func (s *GRPCService) DeleteDocument(stream distribute.Distribute_DeleteDocument close(respChan) // summarize responses - totalCount := len(ids) + totalCount := 0 for r := range respChan { + if r.res.Count >= 0 { + totalCount += int(r.res.Count) + } if r.err != nil { s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) } } - // response - resp := &distribute.DeleteDocumentResponse{ + return &distribute.BulkDeleteResponse{ Count: int32(totalCount), + }, nil +} + +func (s *GRPCService) Search(ctx context.Context, req *distribute.SearchRequest) (*distribute.SearchResponse, error) { + start := time.Now() + + resp := &distribute.SearchResponse{} + + indexerClients := s.getIndexerClients() + + // cluster id list sorted by cluster id + clusterIds := make([]string, 0) + for clusterId := range indexerClients { + clusterIds = append(clusterIds, clusterId) + sort.Strings(clusterIds) + } + + type respVal struct { + clusterId string + searchResult *bleve.SearchResult + err error } - return stream.SendAndClose(resp) + // create response channel + respChan := make(chan respVal, len(clusterIds)) + + // create search request + ins, err := protobuf.MarshalAny(req.SearchRequest) + if err != nil { + s.logger.Error(err.Error()) + return resp, err + } + searchRequest := ins.(*bleve.SearchRequest) + + // change to distributed search request + from := searchRequest.From + size := searchRequest.Size + searchRequest.From = 0 + searchRequest.Size = from + size + + wg := &sync.WaitGroup{} + for clusterId, client := range indexerClients { + wg.Add(1) + go func(clusterId string, client *indexer.GRPCClient, searchRequest *bleve.SearchRequest, respChan chan respVal) { + searchRequestAny := &any.Any{} + err := protobuf.UnmarshalAny(searchRequest, searchRequestAny) + if err != nil { + respChan <- respVal{ + clusterId: clusterId, + searchResult: nil, + err: err, + } + return + } + + iReq := &index.SearchRequest{ + SearchRequest: searchRequestAny, + } + + iRes, err := client.Search(iReq) + + searchResult, err := protobuf.MarshalAny(iRes.SearchResult) + if err != nil { + respChan <- respVal{ + clusterId: clusterId, + searchResult: nil, + err: err, + } + return + } + + wg.Done() + respChan <- respVal{ + clusterId: clusterId, + searchResult: searchResult.(*bleve.SearchResult), + err: err, + } + }(clusterId, client, searchRequest, respChan) + } + wg.Wait() + + // close response channel + close(respChan) + + // revert to original search request + searchRequest.From = from + searchRequest.Size = size + + // summarize responses + var searchResult *bleve.SearchResult + for r := range respChan { + if r.searchResult != nil { + if searchResult == nil { + searchResult = r.searchResult + } else { + searchResult.Merge(r.searchResult) + } + } + if r.err != nil { + s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) + } + } + + // handle case where no results were successful + if searchResult == nil { + searchResult = &bleve.SearchResult{ + Status: &bleve.SearchStatus{ + Errors: make(map[string]error), + }, + } + } + + // sort all hits with the requested order + if len(searchRequest.Sort) > 0 { + sorter := sortutils.NewMultiSearchHitSorter(searchRequest.Sort, searchResult.Hits) + sort.Sort(sorter) + } + + // now skip over the correct From + if searchRequest.From > 0 && len(searchResult.Hits) > searchRequest.From { + searchResult.Hits = searchResult.Hits[searchRequest.From:] + } else if searchRequest.From > 0 { + searchResult.Hits = search.DocumentMatchCollection{} + } + + // now trim to the correct size + if searchRequest.Size > 0 && len(searchResult.Hits) > searchRequest.Size { + searchResult.Hits = searchResult.Hits[0:searchRequest.Size] + } + + // fix up facets + for name, fr := range searchRequest.Facets { + searchResult.Facets.Fixup(name, fr.Size) + } + + // fix up original request + searchResult.Request = searchRequest + searchDuration := time.Since(start) + searchResult.Took = searchDuration + + searchResultAny := &any.Any{} + err = protobuf.UnmarshalAny(searchResult, searchResultAny) + if err != nil { + s.logger.Error(err.Error()) + return resp, err + } + + // response + resp.SearchResult = searchResultAny + + return resp, nil } diff --git a/dispatcher/http_handler.go b/dispatcher/http_handler.go index dec3163..3e2ec1b 100644 --- a/dispatcher/http_handler.go +++ b/dispatcher/http_handler.go @@ -15,20 +15,11 @@ package dispatcher import ( - "bufio" - "encoding/json" - "io" - "io/ioutil" "net/http" - "strings" "time" - "github.com/blevesearch/bleve" - "github.com/golang/protobuf/proto" "github.com/gorilla/mux" - "github.com/mosuka/blast/errors" blasthttp "github.com/mosuka/blast/http" - "github.com/mosuka/blast/protobuf/index" "github.com/mosuka/blast/version" "github.com/prometheus/client_golang/prometheus/promhttp" "go.uber.org/zap" @@ -37,43 +28,23 @@ import ( type Router struct { mux.Router - GRPCClient *GRPCClient - logger *zap.Logger + logger *zap.Logger } -func NewRouter(grpcAddr string, logger *zap.Logger) (*Router, error) { - grpcClient, err := NewGRPCClient(grpcAddr) - if err != nil { - return nil, err - } - +func NewRouter(logger *zap.Logger) (*Router, error) { router := &Router{ - GRPCClient: grpcClient, - logger: logger, + logger: logger, } router.StrictSlash(true) router.Handle("/", NewRootHandler(logger)).Methods("GET") - router.Handle("/documents", NewSetDocumentHandler(router.GRPCClient, logger)).Methods("PUT") - router.Handle("/documents", NewDeleteDocumentHandler(router.GRPCClient, logger)).Methods("DELETE") - router.Handle("/documents/{id}", NewGetDocumentHandler(router.GRPCClient, logger)).Methods("GET") - router.Handle("/documents/{id}", NewSetDocumentHandler(router.GRPCClient, logger)).Methods("PUT") - router.Handle("/documents/{id}", NewDeleteDocumentHandler(router.GRPCClient, logger)).Methods("DELETE") - router.Handle("/search", NewSearchHandler(router.GRPCClient, logger)).Methods("POST") router.Handle("/metrics", promhttp.Handler()).Methods("GET") return router, nil } func (r *Router) Close() error { - r.GRPCClient.Cancel() - - err := r.GRPCClient.Close() - if err != nil { - return err - } - return nil } @@ -106,536 +77,3 @@ func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { blasthttp.WriteResponse(w, content, status, h.logger) } - -type GetHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewGetDocumentHandler(client *GRPCClient, logger *zap.Logger) *GetHandler { - return &GetHandler{ - client: client, - logger: logger, - } -} - -func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - vars := mux.Vars(r) - - doc, err := h.client.GetDocument(vars["id"]) - if err != nil { - switch err { - case errors.ErrNotFound: - status = http.StatusNotFound - default: - status = http.StatusInternalServerError - } - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - content, err = index.MarshalDocument(doc) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type IndexHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewSetDocumentHandler(client *GRPCClient, logger *zap.Logger) *IndexHandler { - return &IndexHandler{ - client: client, - logger: logger, - } -} - -func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - // create documents - docs := make([]*index.Document, 0) - - vars := mux.Vars(r) - id := vars["id"] - - bulk := func(values []string) bool { - for _, value := range values { - if strings.ToLower(value) == "true" { - return true - } - } - return false - }(r.URL.Query()["bulk"]) - - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if id == "" { - if bulk { - s := strings.NewReader(string(bodyBytes)) - reader := bufio.NewReader(s) - for { - docBytes, err := reader.ReadBytes('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(docBytes) > 0 { - var doc *index.Document - err = proto.Unmarshal(bodyBytes, doc) - //doc, err := indexutils.NewDocumentFromBytes(docBytes) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docs = append(docs, doc) - } - break - } - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docs = append(docs, doc) - } - } - } else { - doc := &index.Document{} - err = index.UnmarshalDocument(bodyBytes, doc) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docs = append(docs, doc) - } - } else { - var fieldsMap map[string]interface{} - err := json.Unmarshal([]byte(bodyBytes), &fieldsMap) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docMap := map[string]interface{}{ - "id": id, - "fields": fieldsMap, - } - docBytes, err := json.Marshal(docMap) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docs = append(docs, doc) - } - - // index documents in bulk - count, err := h.client.IndexDocument(docs) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // create JSON content - msgMap := map[string]interface{}{ - "count": count, - } - content, err = json.MarshalIndent(msgMap, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type DeleteHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewDeleteDocumentHandler(client *GRPCClient, logger *zap.Logger) *DeleteHandler { - return &DeleteHandler{ - client: client, - logger: logger, - } -} - -func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - // create documents - ids := make([]string, 0) - - vars := mux.Vars(r) - id := vars["id"] - - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if id == "" { - s := strings.NewReader(string(bodyBytes)) - reader := bufio.NewReader(s) - for { - docId, err := reader.ReadString('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if docId == "" { - ids = append(ids, docId) - } - break - } - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if docId == "" { - ids = append(ids, docId) - } - } - } else { - // Deleting a document - ids = append(ids, id) - } - - // delete documents in bulk - count, err := h.client.DeleteDocument(ids) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // create JSON content - msgMap := map[string]interface{}{ - "count": count, - } - content, err = json.MarshalIndent(msgMap, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } -} - -type SearchHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewSearchHandler(client *GRPCClient, logger *zap.Logger) *SearchHandler { - return &SearchHandler{ - client: client, - logger: logger, - } -} - -func (h *SearchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - searchRequestBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // []byte -> bleve.SearchRequest - searchRequest := bleve.NewSearchRequest(nil) - if len(searchRequestBytes) > 0 { - err := json.Unmarshal(searchRequestBytes, searchRequest) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - } - - searchResult, err := h.client.Search(searchRequest) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - content, err = json.MarshalIndent(&searchResult, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} diff --git a/dispatcher/server.go b/dispatcher/server.go index 447c51a..529401e 100644 --- a/dispatcher/server.go +++ b/dispatcher/server.go @@ -22,6 +22,7 @@ import ( type Server struct { managerGrpcAddress string grpcAddress string + grpcGatewayAddress string httpAddress string logger *zap.Logger grpcLogger *zap.Logger @@ -29,14 +30,16 @@ type Server struct { grpcService *GRPCService grpcServer *GRPCServer + grpcGateway *GRPCGateway httpRouter *Router httpServer *HTTPServer } -func NewServer(managerGrpcAddress string, grpcAddress string, httpAddress string, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { +func NewServer(managerGrpcAddress string, grpcAddress string, grpcGatewayAddress string, httpAddress string, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { return &Server{ managerGrpcAddress: managerGrpcAddress, grpcAddress: grpcAddress, + grpcGatewayAddress: grpcGatewayAddress, httpAddress: httpAddress, logger: logger, grpcLogger: grpcLogger, @@ -61,8 +64,15 @@ func (s *Server) Start() { return } + // create gRPC gateway + s.grpcGateway, err = NewGRPCGateway(s.grpcGatewayAddress, s.grpcAddress, s.logger) + if err != nil { + s.logger.Error(err.Error()) + return + } + // create HTTP router - s.httpRouter, err = NewRouter(s.grpcAddress, s.logger) + s.httpRouter, err = NewRouter(s.logger) if err != nil { s.logger.Fatal(err.Error()) return @@ -95,6 +105,12 @@ func (s *Server) Start() { } }() + // start gRPC gateway + s.logger.Info("start gRPC gateway") + go func() { + _ = s.grpcGateway.Start() + }() + // start HTTP server s.logger.Info("start HTTP server") go func() { @@ -109,11 +125,18 @@ func (s *Server) Stop() { s.logger.Error(err.Error()) } + s.logger.Info("stop HTTP router") err = s.httpRouter.Close() if err != nil { s.logger.Error(err.Error()) } + s.logger.Info("stop gRPC gateway") + err = s.grpcGateway.Stop() + if err != nil { + s.logger.Error(err.Error()) + } + s.logger.Info("stop gRPC server") err = s.grpcServer.Stop() if err != nil { diff --git a/dispatcher/server_test.go b/dispatcher/server_test.go index 28ed3f4..dd727d7 100644 --- a/dispatcher/server_test.go +++ b/dispatcher/server_test.go @@ -22,13 +22,13 @@ import ( "testing" "time" + "github.com/golang/protobuf/ptypes/empty" + "github.com/google/go-cmp/cmp" "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/protobuf/index" - "github.com/mosuka/blast/indexutils" - "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf/index" "github.com/mosuka/blast/protobuf/management" "github.com/mosuka/blast/testutils" ) @@ -42,6 +42,7 @@ func TestServer_Start(t *testing.T) { managerPeerGrpcAddress1 := "" managerGrpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + managerGrpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) managerHttpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) managerNodeId1 := "manager1" managerBindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -53,8 +54,9 @@ func TestServer_Start(t *testing.T) { BindAddress: managerBindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress1, - HttpAddress: managerHttpAddress1, + GrpcAddress: managerGrpcAddress1, + GrpcGatewayAddress: managerGrpcGatewayAddress1, + HttpAddress: managerHttpAddress1, }, } @@ -84,6 +86,7 @@ func TestServer_Start(t *testing.T) { managerPeerGrpcAddress2 := managerGrpcAddress1 managerGrpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + managerGrpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) managerHttpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) managerNodeId2 := "manager2" managerBindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -95,8 +98,9 @@ func TestServer_Start(t *testing.T) { BindAddress: managerBindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress2, - HttpAddress: managerHttpAddress2, + GrpcAddress: managerGrpcAddress2, + GrpcGatewayAddress: managerGrpcGatewayAddress2, + HttpAddress: managerHttpAddress2, }, } @@ -126,6 +130,7 @@ func TestServer_Start(t *testing.T) { managerPeerGrpcAddress3 := managerGrpcAddress1 managerGrpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + managerGrpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) managerHttpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) managerNodeId3 := "manager3" managerBindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -137,8 +142,9 @@ func TestServer_Start(t *testing.T) { BindAddress: managerBindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress3, - HttpAddress: managerHttpAddress3, + GrpcAddress: managerGrpcAddress3, + GrpcGatewayAddress: managerGrpcGatewayAddress3, + HttpAddress: managerHttpAddress3, }, } @@ -175,7 +181,7 @@ func TestServer_Start(t *testing.T) { t.Fatalf("%v", err) } // get cluster info from manager1 - managerCluster1, err := managerClient1.ClusterInfo() + resClusterInfo, err := managerClient1.ClusterInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -186,8 +192,9 @@ func TestServer_Start(t *testing.T) { BindAddress: managerBindAddress1, State: management.Node_LEADER, Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress1, - HttpAddress: managerHttpAddress1, + GrpcAddress: managerGrpcAddress1, + GrpcGatewayAddress: managerGrpcGatewayAddress1, + HttpAddress: managerHttpAddress1, }, }, managerNodeId2: { @@ -195,8 +202,9 @@ func TestServer_Start(t *testing.T) { BindAddress: managerBindAddress2, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress2, - HttpAddress: managerHttpAddress2, + GrpcAddress: managerGrpcAddress2, + GrpcGatewayAddress: managerGrpcGatewayAddress2, + HttpAddress: managerHttpAddress2, }, }, managerNodeId3: { @@ -204,13 +212,14 @@ func TestServer_Start(t *testing.T) { BindAddress: managerBindAddress3, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress3, - HttpAddress: managerHttpAddress3, + GrpcAddress: managerGrpcAddress3, + GrpcGatewayAddress: managerGrpcGatewayAddress3, + HttpAddress: managerHttpAddress3, }, }, }, } - actManagerCluster1 := managerCluster1 + actManagerCluster1 := resClusterInfo.Cluster if !reflect.DeepEqual(expManagerCluster1, actManagerCluster1) { t.Fatalf("expected content to see %v, saw %v", expManagerCluster1, actManagerCluster1) } @@ -219,9 +228,10 @@ func TestServer_Start(t *testing.T) { // indexer cluster1 // indexerManagerGrpcAddress1 := managerGrpcAddress1 - indexerShardId1 := "shard-1" + indexerShardId1 := "shard1" indexerPeerGrpcAddress1 := "" indexerGrpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerGrpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerHttpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerNodeId1 := "indexer1" indexerBindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -236,8 +246,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress1, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress1, - HttpAddress: indexerHttpAddress1, + GrpcAddress: indexerGrpcAddress1, + GrpcGatewayAddress: indexerGrpcGatewayAddress1, + HttpAddress: indexerHttpAddress1, }, } indexerIndexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) @@ -259,9 +270,10 @@ func TestServer_Start(t *testing.T) { time.Sleep(5 * time.Second) indexerManagerGrpcAddress2 := managerGrpcAddress1 - indexerShardId2 := "shard-1" + indexerShardId2 := "shard1" indexerPeerGrpcAddress2 := "" indexerGrpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerGrpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerHttpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerNodeId2 := "indexer2" indexerBindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -276,8 +288,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress2, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress2, - HttpAddress: indexerHttpAddress2, + GrpcAddress: indexerGrpcAddress2, + GrpcGatewayAddress: indexerGrpcGatewayAddress2, + HttpAddress: indexerHttpAddress2, }, } indexerIndexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) @@ -299,9 +312,10 @@ func TestServer_Start(t *testing.T) { time.Sleep(5 * time.Second) indexerManagerGrpcAddress3 := managerGrpcAddress1 - indexerShardId3 := "shard-1" + indexerShardId3 := "shard1" indexerPeerGrpcAddress3 := "" indexerGrpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerGrpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerHttpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerNodeId3 := "indexer3" indexerBindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -316,8 +330,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress3, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress3, - HttpAddress: indexerHttpAddress3, + GrpcAddress: indexerGrpcAddress3, + GrpcGatewayAddress: indexerGrpcGatewayAddress3, + HttpAddress: indexerHttpAddress3, }, } indexerIndexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) @@ -347,7 +362,7 @@ func TestServer_Start(t *testing.T) { t.Fatalf("%v", err) } // get cluster info from manager1 - indexerCluster1, err := indexerClient1.ClusterInfo() + resClusterInfoIndexer1, err := indexerClient1.ClusterInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -358,8 +373,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress1, State: index.Node_LEADER, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress1, - HttpAddress: indexerHttpAddress1, + GrpcAddress: indexerGrpcAddress1, + GrpcGatewayAddress: indexerGrpcGatewayAddress1, + HttpAddress: indexerHttpAddress1, }, }, indexerNodeId2: { @@ -367,8 +383,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress2, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress2, - HttpAddress: indexerHttpAddress2, + GrpcAddress: indexerGrpcAddress2, + GrpcGatewayAddress: indexerGrpcGatewayAddress2, + HttpAddress: indexerHttpAddress2, }, }, indexerNodeId3: { @@ -376,14 +393,15 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress3, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress3, - HttpAddress: indexerHttpAddress3, + GrpcAddress: indexerGrpcAddress3, + GrpcGatewayAddress: indexerGrpcGatewayAddress3, + HttpAddress: indexerHttpAddress3, }, }, }, } - actIndexerCluster1 := indexerCluster1 - if !reflect.DeepEqual(expIndexerCluster1, actIndexerCluster1) { + actIndexerCluster1 := resClusterInfoIndexer1.Cluster + if !cmp.Equal(expIndexerCluster1, actIndexerCluster1) { t.Fatalf("expected content to see %v, saw %v", expIndexerCluster1, actIndexerCluster1) } @@ -391,9 +409,10 @@ func TestServer_Start(t *testing.T) { // indexer cluster2 // indexerManagerGrpcAddress4 := managerGrpcAddress1 - indexerShardId4 := "shard-2" + indexerShardId4 := "shard2" indexerPeerGrpcAddress4 := "" indexerGrpcAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerGrpcGatewayAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerHttpAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerNodeId4 := "indexer4" indexerBindAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -408,8 +427,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress4, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress4, - HttpAddress: indexerHttpAddress4, + GrpcAddress: indexerGrpcAddress4, + GrpcGatewayAddress: indexerGrpcGatewayAddress4, + HttpAddress: indexerHttpAddress4, }, } indexerIndexMapping4, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) @@ -431,9 +451,10 @@ func TestServer_Start(t *testing.T) { time.Sleep(5 * time.Second) indexerManagerGrpcAddress5 := managerGrpcAddress1 - indexerShardId5 := "shard-2" + indexerShardId5 := "shard2" indexerPeerGrpcAddress5 := "" indexerGrpcAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerGrpcGatewayAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerHttpAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerNodeId5 := "indexer5" indexerBindAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -448,8 +469,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress5, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress5, - HttpAddress: indexerHttpAddress5, + GrpcAddress: indexerGrpcAddress5, + GrpcGatewayAddress: indexerGrpcGatewayAddress5, + HttpAddress: indexerHttpAddress5, }, } indexerIndexMapping5, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) @@ -471,9 +493,10 @@ func TestServer_Start(t *testing.T) { time.Sleep(5 * time.Second) indexerManagerGrpcAddress6 := managerGrpcAddress1 - indexerShardId6 := "shard-2" + indexerShardId6 := "shard2" indexerPeerGrpcAddress6 := "" indexerGrpcAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerGrpcGatewayAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerHttpAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerNodeId6 := "indexer6" indexerBindAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -488,8 +511,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress6, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress6, - HttpAddress: indexerHttpAddress6, + GrpcAddress: indexerGrpcAddress6, + GrpcGatewayAddress: indexerGrpcGatewayAddress6, + HttpAddress: indexerHttpAddress6, }, } indexerIndexMapping6, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) @@ -519,7 +543,7 @@ func TestServer_Start(t *testing.T) { t.Fatalf("%v", err) } // get cluster info from manager1 - indexerCluster2, err := indexerClient2.ClusterInfo() + indexerCluster2, err := indexerClient2.ClusterInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -530,8 +554,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress4, State: index.Node_LEADER, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress4, - HttpAddress: indexerHttpAddress4, + GrpcAddress: indexerGrpcAddress4, + GrpcGatewayAddress: indexerGrpcGatewayAddress4, + HttpAddress: indexerHttpAddress4, }, }, indexerNodeId5: { @@ -539,8 +564,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress5, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress5, - HttpAddress: indexerHttpAddress5, + GrpcAddress: indexerGrpcAddress5, + GrpcGatewayAddress: indexerGrpcGatewayAddress5, + HttpAddress: indexerHttpAddress5, }, }, indexerNodeId6: { @@ -548,13 +574,14 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress6, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress6, - HttpAddress: indexerHttpAddress6, + GrpcAddress: indexerGrpcAddress6, + GrpcGatewayAddress: indexerGrpcGatewayAddress6, + HttpAddress: indexerHttpAddress6, }, }, }, } - actIndexerCluster2 := indexerCluster2 + actIndexerCluster2 := indexerCluster2.Cluster if !reflect.DeepEqual(expIndexerCluster2, actIndexerCluster2) { t.Fatalf("expected content to see %v, saw %v", expIndexerCluster2, actIndexerCluster2) } @@ -564,9 +591,10 @@ func TestServer_Start(t *testing.T) { // dispatcherManagerGrpcAddress := managerGrpcAddress1 dispatcherGrpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dispatcherGrpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) dispatcherHttpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dispatcher1, err := NewServer(dispatcherManagerGrpcAddress, dispatcherGrpcAddress, dispatcherHttpAddress, logger.Named("dispatcher1"), grpcLogger.Named("dispatcher1"), httpAccessLogger) + dispatcher1, err := NewServer(dispatcherManagerGrpcAddress, dispatcherGrpcAddress, dispatcherGrpcGatewayAddress, dispatcherHttpAddress, logger.Named("dispatcher1"), grpcLogger.Named("dispatcher1"), httpAccessLogger) defer func() { dispatcher1.Stop() }() diff --git a/example/geo_search_request.json b/example/geo_search_request.json index 40baa91..2883245 100644 --- a/example/geo_search_request.json +++ b/example/geo_search_request.json @@ -1,26 +1,28 @@ { - "query": { - "location": { - "lon": -122.107799, - "lat": 37.399285 - }, - "distance": "100mi", - "field": "geo" - }, - "size": 10, - "from": 0, - "fields": [ - "*" - ], - "sort": [ - { - "by": "geo_distance", - "field": "geo", - "unit": "mi", + "search_request": { + "query": { "location": { "lon": -122.107799, "lat": 37.399285 + }, + "distance": "100mi", + "field": "geo" + }, + "size": 10, + "from": 0, + "fields": [ + "*" + ], + "sort": [ + { + "by": "geo_distance", + "field": "geo", + "unit": "mi", + "location": { + "lon": -122.107799, + "lat": 37.399285 + } } - } - ] + ] + } } diff --git a/example/wiki_bulk_delete.txt b/example/wiki_bulk_delete.txt index 6f7ddd9..8928994 100644 --- a/example/wiki_bulk_delete.txt +++ b/example/wiki_bulk_delete.txt @@ -1,4 +1,36 @@ arwiki_1 bgwiki_1 cawiki_1 +cswiki_1 +dawiki_1 +dewiki_1 +elwiki_1 +enwiki_1 +eswiki_1 +fawiki_1 +fiwiki_1 +frwiki_1 +gawiki_1 +glwiki_1 +guwiki_1 +hiwiki_1 +huwiki_1 +hywiki_1 +idwiki_1 +itwiki_1 +jawiki_1 +knwiki_1 +kowiki_1 +mlwiki_1 +nlwiki_1 +nowiki_1 +pswiki_1 +ptwiki_1 +rowiki_1 +ruwiki_1 +svwiki_1 +tawiki_1 +tewiki_1 +thwiki_1 +trwiki_1 zhwiki_1 diff --git a/example/wiki_search_request.json b/example/wiki_search_request.json index c189f9f..3566d99 100644 --- a/example/wiki_search_request.json +++ b/example/wiki_search_request.json @@ -1,44 +1,46 @@ { - "query": { - "query": "+_all:search" - }, - "size": 10, - "from": 0, - "fields": [ - "*" - ], - "sort": [ - "-_score", - "_id", - "-timestamp" - ], - "facets": { - "Type count": { - "size": 10, - "field": "_type" + "search_request": { + "query": { + "query": "+_all:search" }, - "Timestamp range": { - "size": 10, - "field": "timestamp", - "date_ranges": [ - { - "name": "2001 - 2010", - "start": "2001-01-01T00:00:00Z", - "end": "2010-12-31T23:59:59Z" - }, - { - "name": "2011 - 2020", - "start": "2011-01-01T00:00:00Z", - "end": "2020-12-31T23:59:59Z" - } + "size": 10, + "from": 0, + "fields": [ + "*" + ], + "sort": [ + "-_score", + "_id", + "-timestamp" + ], + "facets": { + "Type count": { + "size": 10, + "field": "_type" + }, + "Timestamp range": { + "size": 10, + "field": "timestamp", + "date_ranges": [ + { + "name": "2001 - 2010", + "start": "2001-01-01T00:00:00Z", + "end": "2010-12-31T23:59:59Z" + }, + { + "name": "2011 - 2020", + "start": "2011-01-01T00:00:00Z", + "end": "2020-12-31T23:59:59Z" + } + ] + } + }, + "highlight": { + "style": "html", + "fields": [ + "title", + "text" ] } - }, - "highlight": { - "style": "html", - "fields": [ - "title", - "text" - ] } } diff --git a/example/wiki_search_request_prefix.json b/example/wiki_search_request_prefix.json index adb5f92..0de0b37 100644 --- a/example/wiki_search_request_prefix.json +++ b/example/wiki_search_request_prefix.json @@ -1,14 +1,16 @@ { - "query": { + "search_request": { + "query": { "prefix": "searc", "field": "title_en" - }, - "size": 10, - "from": 0, - "fields": [ - "*" - ], - "sort": [ - "-_score" - ] + }, + "size": 10, + "from": 0, + "fields": [ + "*" + ], + "sort": [ + "-_score" + ] + } } diff --git a/example/wiki_search_request_simple.json b/example/wiki_search_request_simple.json index e4cac4d..39a3e93 100644 --- a/example/wiki_search_request_simple.json +++ b/example/wiki_search_request_simple.json @@ -1,13 +1,15 @@ { - "query": { - "query": "+_all:search" - }, - "size": 10, - "from": 0, - "fields": [ - "*" - ], - "sort": [ - "-_score" - ] + "search_request": { + "query": { + "query": "+_all:search" + }, + "size": 10, + "from": 0, + "fields": [ + "*" + ], + "sort": [ + "-_score" + ] + } } diff --git a/go.mod b/go.mod index 9edf824..99b2de5 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/mosuka/blast go 1.12 require ( + cloud.google.com/go v0.43.0 // indirect github.com/blevesearch/bleve v0.7.0 github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3 // indirect github.com/blevesearch/cld2 v0.0.0-20150916130542-10f17c049ec9 // indirect @@ -17,17 +18,20 @@ require ( github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect github.com/gogo/protobuf v1.1.1 - github.com/golang/protobuf v1.3.1 - github.com/google/go-cmp v0.3.0 + github.com/golang/protobuf v1.3.2 + github.com/google/go-cmp v0.3.1 + github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70 // indirect github.com/gorilla/mux v1.7.0 github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/hashicorp/golang-lru v0.5.1 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.9.5 + github.com/hashicorp/golang-lru v0.5.3 // indirect github.com/hashicorp/raft v1.1.0 github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477 github.com/ikawaha/kagome.ipadic v1.0.1 // indirect github.com/imdario/mergo v0.3.7 github.com/jmhodges/levigo v1.0.0 // indirect + github.com/kr/pty v1.1.8 // indirect github.com/markthethomas/raft-badger v0.0.0-20190420151455-b37d14e77a69 github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217 github.com/mosuka/bbadger v0.0.0-20190319122948-67a91aedfe68 @@ -37,6 +41,7 @@ require ( github.com/prometheus/common v0.2.0 // indirect github.com/prometheus/procfs v0.0.0-20190322151404-55ae3d9d5573 // indirect github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 // indirect + github.com/rogpeppe/fastuuid v1.2.0 // indirect github.com/stretchr/objx v0.1.1 github.com/syndtr/goleveldb v1.0.0 // indirect github.com/tebeka/snowball v0.0.0-20130405174319-16e884df4e19 // indirect @@ -45,9 +50,14 @@ require ( go.uber.org/atomic v1.4.0 // indirect go.uber.org/multierr v1.1.0 // indirect go.uber.org/zap v1.10.0 - golang.org/x/net v0.0.0-20190327214358-63eda1eb0650 // indirect - google.golang.org/genproto v0.0.0-20190327125643-d831d65fe17d // indirect - google.golang.org/grpc v1.19.1 + golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 // indirect + golang.org/x/mobile v0.0.0-20190806162312-597adff16ade // indirect + golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 // indirect + golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa // indirect + golang.org/x/tools v0.0.0-20190808195139-e713427fea3f // indirect + google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 + google.golang.org/grpc v1.22.1 gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/yaml.v2 v2.2.2 + honnef.co/go/tools v0.0.1-2019.2.2 // indirect ) diff --git a/go.sum b/go.sum index 91ed1f7..c57d73c 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,13 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/RoaringBitmap/roaring v0.4.17 h1:oCYFIFEMSQZrLHpywH7919esI1VSrQZ0pJXkZPGIJ78= github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= @@ -40,6 +44,7 @@ github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498 h1:b8rnI4JWbakUNfpm github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498/go.mod h1:mGI1GcdgmlL3Imff7Z+OjkkQ8qSKr443BuZ+qFgWbPQ= github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe h1:2o6Y7KMjJNsuMTF8f2H2eTKRhqH7+bQbjr+D+LnhE5M= github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe/go.mod h1:prYTC8EgTu3gwbqJihkud9zRXISvyulAplQ6exdCo1g= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d h1:SwD98825d6bdB+pEuTxWOXiSjBrHdOl/UVp75eI7JT8= github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= @@ -64,6 +69,8 @@ github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQD github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493 h1:OTanQnFt0bi5iLFSdbEVA/idR6Q2WhCm+deb7ir2CcM= @@ -76,14 +83,29 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.7.0 h1:tOSd0UKHQd6urX6ApfOn4XdBMY6Sh1MfxV3kmaazO+U= @@ -92,6 +114,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmo github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= @@ -105,8 +129,9 @@ github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/raft v1.0.0 h1:htBVktAOtGs4Le5Z7K8SF5H2+oWsQFYVmOgH5loro7Y= github.com/hashicorp/raft v1.0.0/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= github.com/hashicorp/raft v1.1.0 h1:qPMePEczgbkiQsqCsRfuHRqvDUO+zmAInDaD5ptXlq0= @@ -121,12 +146,20 @@ github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/markthethomas/raft-badger v0.0.0-20190420151455-b37d14e77a69 h1:/ylv98AIMI8XzkeqJGmJSTc/zRQrNllmYWW5b2MoyD4= github.com/markthethomas/raft-badger v0.0.0-20190420151455-b37d14e77a69/go.mod h1:H6ZQv8h8j98nwnF25XLGalSOLhFRjFQ2GGNZRNkkw8Y= github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217 h1:oWyemD7bnPAGRGGPE22W1Z+kspkC7Uclz5rdzgxxiwk= @@ -170,6 +203,9 @@ github.com/prometheus/procfs v0.0.0-20190322151404-55ae3d9d5573 h1:gAuD3LIrjkoOO github.com/prometheus/procfs v0.0.0-20190322151404-55ae3d9d5573/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 h1:YDeskXpkNDhPdWN3REluVa46HQOVuVkjkd2sWnrABNQ= github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= @@ -197,6 +233,8 @@ github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= @@ -206,53 +244,126 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190806162312-597adff16ade/go.mod h1:AlhUtkH4DA4asiFC5RgK7ZKmauvtkAVcy9L0epCzlWo= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190327214358-63eda1eb0650 h1:XCbwcsP09zrBt1aYht0fASw+ynbEpYr8NnCkIN9nMM0= -golang.org/x/net v0.0.0-20190327214358-63eda1eb0650/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed h1:uPxWBzB3+mlnjy9W58qY1j/cjyFjutgw/Vhan2zLy/A= golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa h1:KIDDMLT1O0Nr7TSxp8xM5tJcdn8tgyAONntO829og1M= +golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190808195139-e713427fea3f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190327125643-d831d65fe17d h1:XB2jc5XQ9uhizGTS2vWcN01bc4dI6z3C4KY5MQm8SS8= -google.golang.org/genproto v0.0.0-20190327125643-d831d65fe17d/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1 h1:/7cs52RnTJmD43s3uxzlq2U7nqVTd/37viQwMrMNlOM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/indexer/grpc_client.go b/indexer/grpc_client.go index 0d9fb3d..38ace62 100644 --- a/indexer/grpc_client.go +++ b/indexer/grpc_client.go @@ -16,18 +16,11 @@ package indexer import ( "context" - "errors" "math" - "github.com/blevesearch/bleve" - "github.com/golang/protobuf/ptypes/any" "github.com/golang/protobuf/ptypes/empty" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/index" "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) type GRPCClient struct { @@ -96,246 +89,62 @@ func (c *GRPCClient) GetAddress() string { return c.conn.Target() } -func (c *GRPCClient) NodeHealthCheck(probe string, opts ...grpc.CallOption) (string, error) { - req := &index.NodeHealthCheckRequest{} - - switch probe { - case index.NodeHealthCheckRequest_HEALTHINESS.String(): - req.Probe = index.NodeHealthCheckRequest_HEALTHINESS - case index.NodeHealthCheckRequest_LIVENESS.String(): - req.Probe = index.NodeHealthCheckRequest_LIVENESS - case index.NodeHealthCheckRequest_READINESS.String(): - req.Probe = index.NodeHealthCheckRequest_READINESS - default: - req.Probe = index.NodeHealthCheckRequest_HEALTHINESS - } - - resp, err := c.client.NodeHealthCheck(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - return index.NodeHealthCheckResponse_UNHEALTHY.String(), errors.New(st.Message()) - } - - return resp.State.String(), nil +func (c *GRPCClient) NodeHealthCheck(req *index.NodeHealthCheckRequest, opts ...grpc.CallOption) (*index.NodeHealthCheckResponse, error) { + return c.client.NodeHealthCheck(c.ctx, req, opts...) } -func (c *GRPCClient) NodeInfo(opts ...grpc.CallOption) (*index.Node, error) { - resp, err := c.client.NodeInfo(c.ctx, &empty.Empty{}, opts...) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - - return resp.Node, nil +func (c *GRPCClient) NodeInfo(req *empty.Empty, opts ...grpc.CallOption) (*index.NodeInfoResponse, error) { + return c.client.NodeInfo(c.ctx, req, opts...) } -func (c *GRPCClient) ClusterJoin(node *index.Node, opts ...grpc.CallOption) error { - req := &index.ClusterJoinRequest{ - Node: node, - } - - _, err := c.client.ClusterJoin(c.ctx, req, opts...) - if err != nil { - return err - } - - return nil +func (c *GRPCClient) ClusterJoin(req *index.ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.ClusterJoin(c.ctx, req, opts...) } -func (c *GRPCClient) ClusterLeave(id string, opts ...grpc.CallOption) error { - req := &index.ClusterLeaveRequest{ - Id: id, - } - - _, err := c.client.ClusterLeave(c.ctx, req, opts...) - if err != nil { - return err - } - - return nil +func (c *GRPCClient) ClusterLeave(req *index.ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.ClusterLeave(c.ctx, req, opts...) } -func (c *GRPCClient) ClusterInfo(opts ...grpc.CallOption) (*index.Cluster, error) { - resp, err := c.client.ClusterInfo(c.ctx, &empty.Empty{}, opts...) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - - return resp.Cluster, nil +func (c *GRPCClient) ClusterInfo(req *empty.Empty, opts ...grpc.CallOption) (*index.ClusterInfoResponse, error) { + return c.client.ClusterInfo(c.ctx, &empty.Empty{}, opts...) } -func (c *GRPCClient) ClusterWatch(opts ...grpc.CallOption) (index.Index_ClusterWatchClient, error) { - req := &empty.Empty{} - - watchClient, err := c.client.ClusterWatch(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - return nil, errors.New(st.Message()) - } - - return watchClient, nil +func (c *GRPCClient) ClusterWatch(req *empty.Empty, opts ...grpc.CallOption) (index.Index_ClusterWatchClient, error) { + return c.client.ClusterWatch(c.ctx, req, opts...) } -func (c *GRPCClient) GetDocument(id string, opts ...grpc.CallOption) (*index.Document, error) { - req := &index.GetDocumentRequest{ - Id: id, - } - - resp, err := c.client.GetDocument(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - switch st.Code() { - case codes.NotFound: - return nil, blasterrors.ErrNotFound - default: - return nil, errors.New(st.Message()) - } - } - - return resp.Document, nil +func (c *GRPCClient) Get(req *index.GetRequest, opts ...grpc.CallOption) (*index.GetResponse, error) { + return c.client.Get(c.ctx, req, opts...) } -func (c *GRPCClient) Search(searchRequest *bleve.SearchRequest, opts ...grpc.CallOption) (*bleve.SearchResult, error) { - // bleve.SearchRequest -> Any - searchRequestAny := &any.Any{} - err := protobuf.UnmarshalAny(searchRequest, searchRequestAny) - if err != nil { - return nil, err - } - - req := &index.SearchRequest{ - SearchRequest: searchRequestAny, - } - - resp, err := c.client.Search(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - - // Any -> bleve.SearchResult - searchResultInstance, err := protobuf.MarshalAny(resp.SearchResult) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - if searchResultInstance == nil { - return nil, errors.New("nil") - } - searchResult := searchResultInstance.(*bleve.SearchResult) - - return searchResult, nil +func (c *GRPCClient) Index(req *index.IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.Index(c.ctx, req, opts...) } -func (c *GRPCClient) IndexDocument(docs []*index.Document, opts ...grpc.CallOption) (int, error) { - stream, err := c.client.IndexDocument(c.ctx, opts...) - if err != nil { - st, _ := status.FromError(err) - - return -1, errors.New(st.Message()) - } - - for _, doc := range docs { - req := &index.IndexDocumentRequest{ - Document: doc, - //Id: id, - //Fields: fieldsAny, - } - - err = stream.Send(req) - if err != nil { - return -1, err - } - } - - resp, err := stream.CloseAndRecv() - if err != nil { - return -1, err - } - - return int(resp.Count), nil +func (c *GRPCClient) Delete(req *index.DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.Delete(c.ctx, req, opts...) } -func (c *GRPCClient) DeleteDocument(ids []string, opts ...grpc.CallOption) (int, error) { - stream, err := c.client.DeleteDocument(c.ctx, opts...) - if err != nil { - st, _ := status.FromError(err) - - return -1, errors.New(st.Message()) - } - - for _, id := range ids { - req := &index.DeleteDocumentRequest{ - Id: id, - } - - err := stream.Send(req) - if err != nil { - return -1, err - } - } - - resp, err := stream.CloseAndRecv() - if err != nil { - return -1, err - } - - return int(resp.Count), nil +func (c *GRPCClient) BulkIndex(req *index.BulkIndexRequest, opts ...grpc.CallOption) (*index.BulkIndexResponse, error) { + return c.client.BulkIndex(c.ctx, req, opts...) } -func (c *GRPCClient) GetIndexConfig(opts ...grpc.CallOption) (map[string]interface{}, error) { - resp, err := c.client.GetIndexConfig(c.ctx, &empty.Empty{}, opts...) - if err != nil { - st, _ := status.FromError(err) - return nil, errors.New(st.Message()) - } - - indexMapping, err := protobuf.MarshalAny(resp.IndexConfig.IndexMapping) - if err != nil { - st, _ := status.FromError(err) - return nil, errors.New(st.Message()) - } - - indexConfig := map[string]interface{}{ - "index_mapping": indexMapping, - "index_type": resp.IndexConfig.IndexType, - "index_storage_type": resp.IndexConfig.IndexStorageType, - } - - return indexConfig, nil +func (c *GRPCClient) BulkDelete(req *index.BulkDeleteRequest, opts ...grpc.CallOption) (*index.BulkDeleteResponse, error) { + return c.client.BulkDelete(c.ctx, req, opts...) } -func (c *GRPCClient) GetIndexStats(opts ...grpc.CallOption) (map[string]interface{}, error) { - resp, err := c.client.GetIndexStats(c.ctx, &empty.Empty{}, opts...) - if err != nil { - st, _ := status.FromError(err) - return nil, errors.New(st.Message()) - } - - indexStatsIntr, err := protobuf.MarshalAny(resp.IndexStats) - if err != nil { - st, _ := status.FromError(err) - return nil, errors.New(st.Message()) - } - indexStats := *indexStatsIntr.(*map[string]interface{}) - - return indexStats, nil +func (c *GRPCClient) Search(req *index.SearchRequest, opts ...grpc.CallOption) (*index.SearchResponse, error) { + return c.client.Search(c.ctx, req, opts...) } -func (c *GRPCClient) Snapshot(opts ...grpc.CallOption) error { - _, err := c.client.Snapshot(c.ctx, &empty.Empty{}) - if err != nil { - st, _ := status.FromError(err) +func (c *GRPCClient) GetIndexConfig(req *empty.Empty, opts ...grpc.CallOption) (*index.GetIndexConfigResponse, error) { + return c.client.GetIndexConfig(c.ctx, &empty.Empty{}, opts...) +} - return errors.New(st.Message()) - } +func (c *GRPCClient) GetIndexStats(req *empty.Empty, opts ...grpc.CallOption) (*index.GetIndexStatsResponse, error) { + return c.client.GetIndexStats(c.ctx, &empty.Empty{}, opts...) +} - return nil +func (c *GRPCClient) Snapshot(req *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.Snapshot(c.ctx, &empty.Empty{}) } diff --git a/indexer/grpc_gateway.go b/indexer/grpc_gateway.go new file mode 100644 index 0000000..3a1fafa --- /dev/null +++ b/indexer/grpc_gateway.go @@ -0,0 +1,376 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package indexer + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + + "github.com/blevesearch/bleve" + "github.com/golang/protobuf/ptypes/any" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/index" + "go.uber.org/zap" + "google.golang.org/grpc" +) + +type JsonMarshaler struct{} + +// ContentType always Returns "application/json". +func (*JsonMarshaler) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JsonMarshaler) Marshal(v interface{}) ([]byte, error) { + switch v.(type) { + case *index.GetResponse: + value, err := protobuf.MarshalAny(v.(*index.GetResponse).Fields) + if err != nil { + return nil, err + } + return json.Marshal( + map[string]interface{}{ + "fields": value, + }, + ) + case *index.SearchResponse: + value, err := protobuf.MarshalAny(v.(*index.SearchResponse).SearchResult) + if err != nil { + return nil, err + } + return json.Marshal( + map[string]interface{}{ + "search_result": value, + }, + ) + default: + return json.Marshal(v) + } +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JsonMarshaler) Unmarshal(data []byte, v interface{}) error { + switch v.(type) { + case *index.SearchRequest: + m := map[string]interface{}{} + err := json.Unmarshal(data, &m) + if err != nil { + return err + } + searchRequestMap, ok := m["search_request"] + if !ok { + return errors.New("search_request does not exist") + } + searchRequestBytes, err := json.Marshal(searchRequestMap) + if err != nil { + return err + } + searchRequest := bleve.NewSearchRequest(nil) + err = json.Unmarshal(searchRequestBytes, searchRequest) + if err != nil { + return err + } + v.(*index.SearchRequest).SearchRequest = &any.Any{} + return protobuf.UnmarshalAny(searchRequest, v.(*index.SearchRequest).SearchRequest) + default: + return json.Unmarshal(data, v) + } +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JsonMarshaler) NewDecoder(r io.Reader) runtime.Decoder { + return runtime.DecoderFunc( + func(v interface{}) error { + buffer, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + switch v.(type) { + case *index.IndexRequest: + var tmpValue map[string]interface{} + err = json.Unmarshal(buffer, &tmpValue) + if err != nil { + return err + } + id, ok := tmpValue["id"].(string) + if ok { + v.(*index.IndexRequest).Id = id + } + + fields, ok := tmpValue["fields"] + if !ok { + return errors.New("value does not exist") + } + v.(*index.IndexRequest).Fields = &any.Any{} + return protobuf.UnmarshalAny(fields, v.(*index.IndexRequest).Fields) + case *index.SearchRequest: + var tmpValue map[string]interface{} + err = json.Unmarshal(buffer, &tmpValue) + if err != nil { + return err + } + searchRequestMap, ok := tmpValue["search_request"] + if !ok { + return errors.New("value does not exist") + } + searchRequestBytes, err := json.Marshal(searchRequestMap) + if err != nil { + return err + } + var searchRequest *bleve.SearchRequest + err = json.Unmarshal(searchRequestBytes, &searchRequest) + if err != nil { + return err + } + v.(*index.SearchRequest).SearchRequest = &any.Any{} + return protobuf.UnmarshalAny(searchRequest, v.(*index.SearchRequest).SearchRequest) + default: + return json.Unmarshal(buffer, v) + } + }, + ) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JsonMarshaler) NewEncoder(w io.Writer) runtime.Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JsonMarshaler) Delimiter() []byte { + return []byte("\n") +} + +type JsonlMarshaler struct{} + +// ContentType always Returns "application/json". +func (*JsonlMarshaler) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JsonlMarshaler) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JsonlMarshaler) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads JSON-LINE stream from "r". +func (j *JsonlMarshaler) NewDecoder(r io.Reader) runtime.Decoder { + return runtime.DecoderFunc( + func(v interface{}) error { + buffer, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + switch v.(type) { + case *index.BulkIndexRequest: + docs := make([]*index.Document, 0) + reader := bufio.NewReader(bytes.NewReader(buffer)) + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) + if err != nil { + return err + } + docs = append(docs, doc) + } + break + } + } + + if len(docBytes) > 0 { + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) + if err != nil { + return err + } + docs = append(docs, doc) + } + } + v.(*index.BulkIndexRequest).Documents = docs + return nil + default: + return json.Unmarshal(buffer, v) + } + }, + ) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JsonlMarshaler) NewEncoder(w io.Writer) runtime.Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JsonlMarshaler) Delimiter() []byte { + return []byte("\n") +} + +type TextMarshaler struct{} + +// ContentType always Returns "application/json". +func (*TextMarshaler) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *TextMarshaler) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal unmarshals JSON data into "v". +func (j *TextMarshaler) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads text stream from "r". +func (j *TextMarshaler) NewDecoder(r io.Reader) runtime.Decoder { + return runtime.DecoderFunc( + func(v interface{}) error { + buffer, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + switch v.(type) { + case *index.BulkDeleteRequest: + ids := make([]string, 0) + reader := bufio.NewReader(bytes.NewReader(buffer)) + for { + //idBytes, err := reader.ReadBytes('\n') + idBytes, _, err := reader.ReadLine() + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(idBytes) > 0 { + ids = append(ids, string(idBytes)) + } + break + } + } + + if len(idBytes) > 0 { + ids = append(ids, string(idBytes)) + } + } + v.(*index.BulkDeleteRequest).Ids = ids + return nil + default: + return json.Unmarshal(buffer, v) + } + }, + ) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *TextMarshaler) NewEncoder(w io.Writer) runtime.Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *TextMarshaler) Delimiter() []byte { + return []byte("\n") +} + +type GRPCGateway struct { + grpcGatewayAddr string + grpcAddr string + logger *zap.Logger + + ctx context.Context + cancel context.CancelFunc + listener net.Listener +} + +func NewGRPCGateway(grpcGatewayAddr string, grpcAddr string, logger *zap.Logger) (*GRPCGateway, error) { + return &GRPCGateway{ + grpcGatewayAddr: grpcGatewayAddr, + grpcAddr: grpcAddr, + logger: logger, + }, nil +} + +func (s *GRPCGateway) Start() error { + s.ctx, s.cancel = NewGRPCContext() + + mux := runtime.NewServeMux( + runtime.WithMarshalerOption("application/json", new(JsonMarshaler)), + runtime.WithMarshalerOption("application/x-ndjson", new(JsonlMarshaler)), + runtime.WithMarshalerOption("text/plain", new(TextMarshaler)), + ) + opts := []grpc.DialOption{grpc.WithInsecure()} + + err := index.RegisterIndexHandlerFromEndpoint(s.ctx, mux, s.grpcAddr, opts) + if err != nil { + return err + } + + s.listener, err = net.Listen("tcp", s.grpcGatewayAddr) + if err != nil { + return err + } + + err = http.Serve(s.listener, mux) + if err != nil { + return err + } + + return nil +} + +func (s *GRPCGateway) Stop() error { + defer s.cancel() + + err := s.listener.Close() + if err != nil { + return err + } + + return nil +} + +func (s *GRPCGateway) GetAddress() (string, error) { + tcpAddr, err := net.ResolveTCPAddr("tcp", s.listener.Addr().String()) + if err != nil { + return "", err + } + + v4Addr := "" + if tcpAddr.IP.To4() != nil { + v4Addr = tcpAddr.IP.To4().String() + } + port := tcpAddr.Port + + return fmt.Sprintf("%s:%d", v4Addr, port), nil +} diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go index 8f6598e..63b8d78 100644 --- a/indexer/grpc_service.go +++ b/indexer/grpc_service.go @@ -157,13 +157,14 @@ func (s *GRPCService) getManagerCluster(managerAddr string) (*management.Cluster return nil, err } - managers, err := client.ClusterInfo() + req := &empty.Empty{} + res, err := client.ClusterInfo(req) if err != nil { s.logger.Error(err.Error()) return nil, err } - return managers, nil + return res.Cluster, nil } func (s *GRPCService) cloneManagerCluster(cluster *management.Cluster) (*management.Cluster, error) { @@ -203,7 +204,8 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { } // create stream for watching cluster changes - stream, err := client.ClusterWatch() + req := &empty.Empty{} + stream, err := client.ClusterWatch(req) if err != nil { s.logger.Error(err.Error()) continue @@ -509,8 +511,17 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { s.logger.Error(err.Error()) continue } - s.logger.Info("update shards", zap.Any("shards", snapshotClusterMap)) - err = client.Set(fmt.Sprintf("cluster/shards/%s", s.shardId), snapshotClusterMap) + valueAny := &any.Any{} + err = protobuf.UnmarshalAny(snapshotClusterMap, valueAny) + if err != nil { + s.logger.Error(err.Error()) + continue + } + req := &management.SetRequest{ + Key: fmt.Sprintf("cluster/shards/%s", s.shardId), + Value: valueAny, + } + _, err = client.Set(req) if err != nil { s.logger.Error(err.Error()) continue @@ -548,12 +559,18 @@ func (s *GRPCService) NodeHealthCheck(ctx context.Context, req *index.NodeHealth resp := &index.NodeHealthCheckResponse{} switch req.Probe { + case index.NodeHealthCheckRequest_UNKNOWN: + fallthrough case index.NodeHealthCheckRequest_HEALTHINESS: resp.State = index.NodeHealthCheckResponse_HEALTHY case index.NodeHealthCheckRequest_LIVENESS: resp.State = index.NodeHealthCheckResponse_ALIVE case index.NodeHealthCheckRequest_READINESS: resp.State = index.NodeHealthCheckResponse_READY + default: + err := errors.New("unknown probe") + s.logger.Error(err.Error()) + return resp, status.Error(codes.InvalidArgument, err.Error()) } return resp, nil @@ -589,7 +606,8 @@ func (s *GRPCService) getPeerNode(id string) (*index.Node, error) { return nil, err } - node, err := s.peerClients[id].NodeInfo() + req := &empty.Empty{} + resp, err := s.peerClients[id].NodeInfo(req) if err != nil { s.logger.Debug(err.Error(), zap.String("id", id)) return &index.Node{ @@ -602,7 +620,7 @@ func (s *GRPCService) getPeerNode(id string) (*index.Node, error) { }, nil } - return node, nil + return resp.Node, nil } func (s *GRPCService) getNode(id string) (*index.Node, error) { @@ -641,7 +659,12 @@ func (s *GRPCService) setNode(node *index.Node) error { s.logger.Error(err.Error()) return err } - err = client.ClusterJoin(node) + + req := &index.ClusterJoinRequest{ + Node: node, + } + + _, err = client.ClusterJoin(req) if err != nil { s.logger.Error(err.Error()) return err @@ -677,7 +700,12 @@ func (s *GRPCService) deleteNode(id string) error { s.logger.Error(err.Error()) return err } - err = client.ClusterLeave(id) + + req := &index.ClusterLeaveRequest{ + Id: id, + } + + _, err = client.ClusterLeave(req) if err != nil { s.logger.Error(err.Error()) return err @@ -758,10 +786,10 @@ func (s *GRPCService) ClusterWatch(req *empty.Empty, server index.Index_ClusterW return nil } -func (s *GRPCService) GetDocument(ctx context.Context, req *index.GetDocumentRequest) (*index.GetDocumentResponse, error) { - resp := &index.GetDocumentResponse{} +func (s *GRPCService) Get(ctx context.Context, req *index.GetRequest) (*index.GetResponse, error) { + resp := &index.GetResponse{} - fields, err := s.raftServer.GetDocument(req.Id) + fields, err := s.raftServer.Get(req.Id) if err != nil { switch err { case blasterrors.ErrNotFound: @@ -773,148 +801,155 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *index.GetDocumentReq } } - docMap := map[string]interface{}{ - "id": req.Id, - "fields": fields, - } - - docBytes, err := json.Marshal(docMap) + fieldsAny := &any.Any{} + err = protobuf.UnmarshalAny(fields, fieldsAny) if err != nil { s.logger.Error(err.Error(), zap.String("id", req.Id)) return resp, status.Error(codes.Internal, err.Error()) } - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", req.Id)) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.Document = doc + resp.Fields = fieldsAny return resp, nil } -func (s *GRPCService) Search(ctx context.Context, req *index.SearchRequest) (*index.SearchResponse, error) { - resp := &index.SearchResponse{} - - searchRequest, err := protobuf.MarshalAny(req.SearchRequest) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.InvalidArgument, err.Error()) - } - - searchResult, err := s.raftServer.Search(searchRequest.(*bleve.SearchRequest)) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } +func (s *GRPCService) Index(ctx context.Context, req *index.IndexRequest) (*empty.Empty, error) { + resp := &empty.Empty{} - searchResultAny := &any.Any{} - err = protobuf.UnmarshalAny(searchResult, searchResultAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) + // index + var err error + if s.raftServer.IsLeader() { + err = s.raftServer.Index(&index.Document{Id: req.Id, Fields: req.Fields}) + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) + } + } else { + // forward to leader + client, err := s.getLeaderClient() + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) + } + resp, err = client.Index(req) + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) + } } - resp.SearchResult = searchResultAny - return resp, nil } -func (s *GRPCService) IndexDocument(stream index.Index_IndexDocumentServer) error { - docs := make([]*index.Document, 0) - - for { - req, err := stream.Recv() - if err != nil { - if err == io.EOF { - s.logger.Debug(err.Error()) - break - } - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - - docs = append(docs, req.Document) - } +func (s *GRPCService) Delete(ctx context.Context, req *index.DeleteRequest) (*empty.Empty, error) { + resp := &empty.Empty{} - // index - count := -1 + // delete var err error if s.raftServer.IsLeader() { - count, err = s.raftServer.IndexDocument(docs) + err = s.raftServer.Delete(req.Id) if err != nil { s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) + return resp, status.Error(codes.Internal, err.Error()) } } else { // forward to leader client, err := s.getLeaderClient() if err != nil { s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) + return resp, status.Error(codes.Internal, err.Error()) } - count, err = client.IndexDocument(docs) + resp, err = client.Delete(req) if err != nil { s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) + return resp, status.Error(codes.Internal, err.Error()) } } - return stream.SendAndClose( - &index.IndexDocumentResponse{ - Count: int32(count), - }, - ) + return resp, nil } -func (s *GRPCService) DeleteDocument(stream index.Index_DeleteDocumentServer) error { - ids := make([]string, 0) +func (s *GRPCService) BulkIndex(ctx context.Context, req *index.BulkIndexRequest) (*index.BulkIndexResponse, error) { + resp := &index.BulkIndexResponse{} - for { - req, err := stream.Recv() + if s.raftServer.IsLeader() { + count, err := s.raftServer.BulkIndex(req.Documents) if err != nil { - if err == io.EOF { - s.logger.Debug(err.Error()) - break - } s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) + resp.Count = -1 + return resp, status.Error(codes.Internal, err.Error()) + } + resp.Count = int32(count) + } else { + // forward to leader + client, err := s.getLeaderClient() + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) + } + resp, err = client.BulkIndex(req) + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) } - - ids = append(ids, req.Id) } - // delete - count := -1 - var err error + return resp, nil +} + +func (s *GRPCService) BulkDelete(ctx context.Context, req *index.BulkDeleteRequest) (*index.BulkDeleteResponse, error) { + resp := &index.BulkDeleteResponse{} + if s.raftServer.IsLeader() { - count, err = s.raftServer.DeleteDocument(ids) + count, err := s.raftServer.BulkDelete(req.Ids) if err != nil { s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) + resp.Count = -1 + return resp, status.Error(codes.Internal, err.Error()) } + resp.Count = int32(count) } else { // forward to leader client, err := s.getLeaderClient() if err != nil { s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) + return resp, status.Error(codes.Internal, err.Error()) } - count, err = client.DeleteDocument(ids) + resp, err := client.BulkDelete(req) if err != nil { s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) + return resp, status.Error(codes.Internal, err.Error()) } } - return stream.SendAndClose( - &index.DeleteDocumentResponse{ - Count: int32(count), - }, - ) + return resp, nil +} + +func (s *GRPCService) Search(ctx context.Context, req *index.SearchRequest) (*index.SearchResponse, error) { + resp := &index.SearchResponse{} + + searchRequest, err := protobuf.MarshalAny(req.SearchRequest) + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.InvalidArgument, err.Error()) + } + + searchResult, err := s.raftServer.Search(searchRequest.(*bleve.SearchRequest)) + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) + } + + searchResultAny := &any.Any{} + err = protobuf.UnmarshalAny(searchResult, searchResultAny) + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) + } + + resp.SearchResult = searchResultAny + + return resp, nil } func (s *GRPCService) GetIndexConfig(ctx context.Context, req *empty.Empty) (*index.GetIndexConfigResponse, error) { diff --git a/indexer/http_handler.go b/indexer/http_handler.go index 6f7688a..6a7353f 100644 --- a/indexer/http_handler.go +++ b/indexer/http_handler.go @@ -15,19 +15,11 @@ package indexer import ( - "bufio" - "encoding/json" - "io" - "io/ioutil" "net/http" - "strings" "time" - "github.com/blevesearch/bleve" "github.com/gorilla/mux" - blasterrors "github.com/mosuka/blast/errors" blasthttp "github.com/mosuka/blast/http" - "github.com/mosuka/blast/protobuf/index" "github.com/mosuka/blast/version" "github.com/prometheus/client_golang/prometheus/promhttp" "go.uber.org/zap" @@ -36,43 +28,23 @@ import ( type Router struct { mux.Router - GRPCClient *GRPCClient - logger *zap.Logger + logger *zap.Logger } -func NewRouter(grpcAddr string, logger *zap.Logger) (*Router, error) { - grpcClient, err := NewGRPCClient(grpcAddr) - if err != nil { - return nil, err - } - +func NewRouter(logger *zap.Logger) (*Router, error) { router := &Router{ - GRPCClient: grpcClient, - logger: logger, + logger: logger, } router.StrictSlash(true) router.Handle("/", NewRootHandler(logger)).Methods("GET") - router.Handle("/documents", NewSetDocumentHandler(router.GRPCClient, logger)).Methods("PUT") - router.Handle("/documents", NewDeleteDocumentHandler(router.GRPCClient, logger)).Methods("DELETE") - router.Handle("/documents/{id}", NewGetDocumentHandler(router.GRPCClient, logger)).Methods("GET") - router.Handle("/documents/{id}", NewSetDocumentHandler(router.GRPCClient, logger)).Methods("PUT") - router.Handle("/documents/{id}", NewDeleteDocumentHandler(router.GRPCClient, logger)).Methods("DELETE") - router.Handle("/search", NewSearchHandler(router.GRPCClient, logger)).Methods("POST") router.Handle("/metrics", promhttp.Handler()).Methods("GET") return router, nil } func (r *Router) Close() error { - r.GRPCClient.Cancel() - - err := r.GRPCClient.Close() - if err != nil { - return err - } - return nil } @@ -105,539 +77,3 @@ func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { blasthttp.WriteResponse(w, content, status, h.logger) } - -type GetHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewGetDocumentHandler(client *GRPCClient, logger *zap.Logger) *GetHandler { - return &GetHandler{ - client: client, - logger: logger, - } -} - -func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - vars := mux.Vars(r) - - id := vars["id"] - - doc, err := h.client.GetDocument(id) - if err != nil { - switch err { - case blasterrors.ErrNotFound: - status = http.StatusNotFound - default: - status = http.StatusInternalServerError - } - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - content, err = index.MarshalDocument(doc) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type IndexHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewSetDocumentHandler(client *GRPCClient, logger *zap.Logger) *IndexHandler { - return &IndexHandler{ - client: client, - logger: logger, - } -} - -func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - // create documents - docs := make([]*index.Document, 0) - - vars := mux.Vars(r) - id := vars["id"] - - bulk := func(values []string) bool { - for _, value := range values { - if strings.ToLower(value) == "true" { - return true - } - } - return false - }(r.URL.Query()["bulk"]) - - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if id == "" { - if bulk { - s := strings.NewReader(string(bodyBytes)) - reader := bufio.NewReader(s) - for { - docBytes, err := reader.ReadBytes('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(bodyBytes, doc) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docs = append(docs, doc) - } - break - } - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docs = append(docs, doc) - } - } - } else { - doc := &index.Document{} - err = index.UnmarshalDocument(bodyBytes, doc) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docs = append(docs, doc) - } - } else { - var fieldsMap map[string]interface{} - err := json.Unmarshal([]byte(bodyBytes), &fieldsMap) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docMap := map[string]interface{}{ - "id": id, - "fields": fieldsMap, - } - docBytes, err := json.Marshal(docMap) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docs = append(docs, doc) - } - - // index documents in bulk - count, err := h.client.IndexDocument(docs) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // create JSON content - msgMap := map[string]interface{}{ - "count": count, - } - content, err = json.MarshalIndent(msgMap, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type DeleteHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewDeleteDocumentHandler(client *GRPCClient, logger *zap.Logger) *DeleteHandler { - return &DeleteHandler{ - client: client, - logger: logger, - } -} - -func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - // create documents - ids := make([]string, 0) - - vars := mux.Vars(r) - id := vars["id"] - - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if id == "" { - s := strings.NewReader(string(bodyBytes)) - reader := bufio.NewReader(s) - for { - docId, err := reader.ReadString('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if docId != "" { - ids = append(ids, docId) - } - break - } - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if docId != "" { - ids = append(ids, docId) - } - } - } else { - // Deleting a document - ids = append(ids, id) - } - - // delete documents in bulk - count, err := h.client.DeleteDocument(ids) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // create JSON content - msgMap := map[string]interface{}{ - "count": count, - } - content, err = json.MarshalIndent(msgMap, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type SearchHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewSearchHandler(client *GRPCClient, logger *zap.Logger) *SearchHandler { - return &SearchHandler{ - client: client, - logger: logger, - } -} - -func (h *SearchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - searchRequestBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // []byte -> bleve.SearchRequest - searchRequest := bleve.NewSearchRequest(nil) - if len(searchRequestBytes) > 0 { - err := json.Unmarshal(searchRequestBytes, searchRequest) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - } - - searchResult, err := h.client.Search(searchRequest) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - content, err = json.MarshalIndent(&searchResult, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} diff --git a/indexer/raft_command.go b/indexer/raft_command.go deleted file mode 100644 index 3cab8f0..0000000 --- a/indexer/raft_command.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import "encoding/json" - -type command int - -const ( - unknown command = iota - setNode - deleteNode - indexDocument - deleteDocument -) - -type message struct { - Command command `json:"command,omitempty"` - Data json.RawMessage `json:"data,omitempty"` -} - -func newMessage(cmd command, data interface{}) (*message, error) { - b, err := json.Marshal(data) - if err != nil { - return nil, err - } - return &message{ - Command: cmd, - Data: b, - }, nil -} diff --git a/indexer/raft_fsm.go b/indexer/raft_fsm.go index 95590fc..da53222 100644 --- a/indexer/raft_fsm.go +++ b/indexer/raft_fsm.go @@ -128,7 +128,7 @@ func (f *RaftFSM) GetDocument(id string) (map[string]interface{}, error) { return fields, nil } -func (f *RaftFSM) IndexDocument(doc *index.Document) error { +func (f *RaftFSM) Index(doc *index.Document) error { err := f.index.Index(doc) if err != nil { f.logger.Error(err.Error()) @@ -138,7 +138,7 @@ func (f *RaftFSM) IndexDocument(doc *index.Document) error { return nil } -func (f *RaftFSM) IndexDocuments(docs []*index.Document) (int, error) { +func (f *RaftFSM) BulkIndex(docs []*index.Document) (int, error) { count, err := f.index.BulkIndex(docs) if err != nil { f.logger.Error(err.Error()) @@ -148,7 +148,7 @@ func (f *RaftFSM) IndexDocuments(docs []*index.Document) (int, error) { return count, nil } -func (f *RaftFSM) DeleteDocument(id string) error { +func (f *RaftFSM) Delete(id string) error { err := f.index.Delete(id) if err != nil { f.logger.Error(err.Error()) @@ -158,7 +158,7 @@ func (f *RaftFSM) DeleteDocument(id string) error { return nil } -func (f *RaftFSM) DeleteDocuments(ids []string) (int, error) { +func (f *RaftFSM) BulkDelete(ids []string) (int, error) { count, err := f.index.BulkDelete(ids) if err != nil { f.logger.Error(err.Error()) @@ -190,75 +190,67 @@ type fsmResponse struct { error error } -type fsmIndexDocumentResponse struct { +type fsmBulkIndexResponse struct { count int error error } -type fsmDeleteDocumentResponse struct { +type fsmBulkDeleteResponse struct { count int error error } func (f *RaftFSM) Apply(l *raft.Log) interface{} { - var msg message - err := json.Unmarshal(l.Data, &msg) + proposal := &index.Proposal{} + err := proto.Unmarshal(l.Data, proposal) if err != nil { + f.logger.Error(err.Error()) return err } - switch msg.Command { - case setNode: - var data map[string]interface{} - err := json.Unmarshal(msg.Data, &data) + switch proposal.Event { + case index.Proposal_SET_NODE: + err = f.SetNode(proposal.Node) if err != nil { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - b, err := json.Marshal(data["node"]) + return &fsmResponse{error: nil} + case index.Proposal_DELETE_NODE: + err = f.DeleteNode(proposal.Node.Id) if err != nil { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - var node *index.Node - err = json.Unmarshal(b, &node) + return &fsmResponse{error: nil} + case index.Proposal_INDEX: + err := f.Index(proposal.Document) if err != nil { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - err = f.SetNode(node) + return &fsmResponse{error: nil} + case index.Proposal_DELETE: + err := f.Delete(proposal.Id) if err != nil { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - return &fsmResponse{error: err} - case deleteNode: - var data map[string]interface{} - err := json.Unmarshal(msg.Data, &data) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - err = f.DeleteNode(data["id"].(string)) - return &fsmResponse{error: err} - case indexDocument: - var data []*index.Document - err := json.Unmarshal(msg.Data, &data) + return &fsmResponse{error: nil} + case index.Proposal_BULK_INDEX: + count, err := f.BulkIndex(proposal.Documents) if err != nil { f.logger.Error(err.Error()) - return &fsmIndexDocumentResponse{count: -1, error: err} + return &fsmBulkIndexResponse{count: count, error: err} } - count, err := f.IndexDocuments(data) - return &fsmIndexDocumentResponse{count: count, error: err} - case deleteDocument: - var data []string - err := json.Unmarshal(msg.Data, &data) + return &fsmBulkIndexResponse{count: count, error: nil} + case index.Proposal_BULK_DELETE: + count, err := f.BulkDelete(proposal.Ids) if err != nil { f.logger.Error(err.Error()) - return &fsmDeleteDocumentResponse{count: -1, error: err} + return &fsmBulkDeleteResponse{count: count, error: err} } - count, err := f.DeleteDocuments(data) - return &fsmDeleteDocumentResponse{count: count, error: err} + return &fsmBulkDeleteResponse{count: count, error: nil} default: err = errors.New("unsupported command") f.logger.Error(err.Error()) diff --git a/indexer/raft_server.go b/indexer/raft_server.go index 0903e67..39ea9a8 100644 --- a/indexer/raft_server.go +++ b/indexer/raft_server.go @@ -15,7 +15,6 @@ package indexer import ( - "encoding/json" "errors" "io/ioutil" "net" @@ -24,6 +23,9 @@ import ( "time" "github.com/blevesearch/bleve" + + "github.com/golang/protobuf/proto" + "github.com/blevesearch/bleve/mapping" "github.com/hashicorp/raft" raftboltdb "github.com/hashicorp/raft-boltdb" @@ -45,8 +47,9 @@ type RaftServer struct { bootstrap bool logger *zap.Logger - raft *raft.Raft - fsm *RaftFSM + transport *raft.NetworkTransport + raft *raft.Raft + fsm *RaftFSM } func NewRaftServer(node *index.Node, dataDir string, raftStorageType string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { @@ -97,7 +100,7 @@ func (s *RaftServer) Start() error { } s.logger.Info("create TCP transport", zap.String("bind_addr", s.node.BindAddress)) - transport, err := raft.NewTCPTransport(s.node.BindAddress, addr, 3, 10*time.Second, ioutil.Discard) + s.transport, err = raft.NewTCPTransport(s.node.BindAddress, addr, 3, 10*time.Second, ioutil.Discard) if err != nil { s.logger.Fatal(err.Error()) return err @@ -185,7 +188,7 @@ func (s *RaftServer) Start() error { } s.logger.Info("create Raft machine") - s.raft, err = raft.NewRaft(raftConfig, s.fsm, logStore, stableStore, snapshotStore, transport) + s.raft, err = raft.NewRaft(raftConfig, s.fsm, logStore, stableStore, snapshotStore, s.transport) if err != nil { s.logger.Fatal(err.Error()) return err @@ -197,7 +200,7 @@ func (s *RaftServer) Start() error { Servers: []raft.Server{ { ID: raftConfig.LocalID, - Address: transport.LocalAddr(), + Address: s.transport.LocalAddr(), }, }, } @@ -287,6 +290,10 @@ func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { return "", blasterrors.ErrNotFoundLeader } +func (s *RaftServer) NodeAddress() string { + return string(s.transport.LocalAddr()) +} + func (s *RaftServer) NodeID() string { return s.node.Id } @@ -324,24 +331,17 @@ func (s *RaftServer) getNode(nodeId string) (*index.Node, error) { } func (s *RaftServer) setNode(node *index.Node) error { - msg, err := newMessage( - setNode, - map[string]interface{}{ - "node": node, - }, - ) - if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) - return err + proposal := &index.Proposal{ + Event: index.Proposal_SET_NODE, + Node: node, } - - msgBytes, err := json.Marshal(msg) + proposalByte, err := proto.Marshal(proposal) if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) + s.logger.Error(err.Error()) return err } - f := s.raft.Apply(msgBytes, 10*time.Second) + f := s.raft.Apply(proposalByte, 10*time.Second) err = f.Error() if err != nil { s.logger.Error(err.Error(), zap.Any("node", node)) @@ -357,24 +357,19 @@ func (s *RaftServer) setNode(node *index.Node) error { } func (s *RaftServer) deleteNode(nodeId string) error { - msg, err := newMessage( - deleteNode, - map[string]interface{}{ - "id": nodeId, + proposal := &index.Proposal{ + Event: index.Proposal_DELETE_NODE, + Node: &index.Node{ + Id: nodeId, }, - ) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err } - - msgBytes, err := json.Marshal(msg) + proposalByte, err := proto.Marshal(proposal) if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) + s.logger.Error(err.Error()) return err } - f := s.raft.Apply(msgBytes, 10*time.Second) + f := s.raft.Apply(proposalByte, 10*time.Second) err = f.Error() if err != nil { s.logger.Error(err.Error(), zap.String("id", nodeId)) @@ -526,7 +521,7 @@ func (s *RaftServer) Snapshot() error { return nil } -func (s *RaftServer) GetDocument(id string) (map[string]interface{}, error) { +func (s *RaftServer) Get(id string) (map[string]interface{}, error) { fields, err := s.fsm.GetDocument(id) if err != nil { switch err { @@ -541,86 +536,138 @@ func (s *RaftServer) GetDocument(id string) (map[string]interface{}, error) { return fields, nil } -func (s *RaftServer) Search(request *bleve.SearchRequest) (*bleve.SearchResult, error) { - result, err := s.fsm.Search(request) +func (s *RaftServer) Index(doc *index.Document) error { + if !s.IsLeader() { + s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) + return raft.ErrNotLeader + } + + proposal := &index.Proposal{ + Event: index.Proposal_INDEX, + Document: doc, + } + proposalByte, err := proto.Marshal(proposal) if err != nil { s.logger.Error(err.Error()) - return nil, err + return err } - return result, nil + f := s.raft.Apply(proposalByte, 10*time.Second) + err = f.Error() + if err != nil { + s.logger.Error(err.Error()) + return err + } + err = f.Response().(*fsmResponse).error + if err != nil { + s.logger.Error(err.Error()) + return err + } + + return nil } -func (s *RaftServer) IndexDocument(docs []*index.Document) (int, error) { +func (s *RaftServer) Delete(id string) error { if !s.IsLeader() { s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return -1, raft.ErrNotLeader + return raft.ErrNotLeader } - msg, err := newMessage( - indexDocument, - docs, - ) + proposal := &index.Proposal{ + Event: index.Proposal_DELETE, + Id: id, + } + proposalByte, err := proto.Marshal(proposal) if err != nil { s.logger.Error(err.Error()) - return -1, err + return err + } + + f := s.raft.Apply(proposalByte, 10*time.Second) + err = f.Error() + if err != nil { + s.logger.Error(err.Error()) + return err + } + err = f.Response().(*fsmResponse).error + if err != nil { + s.logger.Error(err.Error()) + return err + } + + return nil +} + +func (s *RaftServer) BulkIndex(docs []*index.Document) (int, error) { + if !s.IsLeader() { + s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) + return -1, raft.ErrNotLeader } - msgBytes, err := json.Marshal(msg) + proposal := &index.Proposal{ + Event: index.Proposal_BULK_INDEX, + Documents: docs, + } + proposalByte, err := proto.Marshal(proposal) if err != nil { s.logger.Error(err.Error()) return -1, err } - f := s.raft.Apply(msgBytes, 10*time.Second) + f := s.raft.Apply(proposalByte, 10*time.Second) err = f.Error() if err != nil { s.logger.Error(err.Error()) return -1, err } - err = f.Response().(*fsmIndexDocumentResponse).error + err = f.Response().(*fsmBulkIndexResponse).error if err != nil { s.logger.Error(err.Error()) return -1, err } - return f.Response().(*fsmIndexDocumentResponse).count, nil + return f.Response().(*fsmBulkIndexResponse).count, nil } -func (s *RaftServer) DeleteDocument(ids []string) (int, error) { +func (s *RaftServer) BulkDelete(ids []string) (int, error) { if !s.IsLeader() { s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) return -1, raft.ErrNotLeader } - msg, err := newMessage( - deleteDocument, - ids, - ) + proposal := &index.Proposal{ + Event: index.Proposal_BULK_DELETE, + Ids: ids, + } + proposalByte, err := proto.Marshal(proposal) if err != nil { s.logger.Error(err.Error()) return -1, err } - msgBytes, err := json.Marshal(msg) + f := s.raft.Apply(proposalByte, 10*time.Second) + err = f.Error() if err != nil { s.logger.Error(err.Error()) return -1, err } - - f := s.raft.Apply(msgBytes, 10*time.Second) - err = f.Error() + err = f.Response().(*fsmBulkDeleteResponse).error if err != nil { s.logger.Error(err.Error()) return -1, err } - err = f.Response().(*fsmDeleteDocumentResponse).error + + return f.Response().(*fsmBulkDeleteResponse).count, nil +} + +func (s *RaftServer) Search(request *bleve.SearchRequest) (*bleve.SearchResult, error) { + result, err := s.fsm.Search(request) if err != nil { s.logger.Error(err.Error()) - return -1, err + return nil, err } - return f.Response().(*fsmDeleteDocumentResponse).count, nil + return result, nil } func (s *RaftServer) GetIndexConfig() (map[string]interface{}, error) { diff --git a/indexer/server.go b/indexer/server.go index d6b8bc2..dbea38b 100644 --- a/indexer/server.go +++ b/indexer/server.go @@ -18,13 +18,16 @@ import ( "encoding/json" "fmt" + accesslog "github.com/mash/go-accesslog" "github.com/mosuka/blast/indexutils" - "github.com/blevesearch/bleve/mapping" + "github.com/mosuka/blast/protobuf/management" - accesslog "github.com/mash/go-accesslog" - "github.com/mosuka/blast/errors" + "github.com/blevesearch/bleve/mapping" + "github.com/golang/protobuf/ptypes/empty" + blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/index" "go.uber.org/zap" ) @@ -46,6 +49,7 @@ type Server struct { raftServer *RaftServer grpcService *GRPCService grpcServer *GRPCServer + grpcGateway *GRPCGateway httpRouter *Router httpServer *HTTPServer } @@ -86,20 +90,29 @@ func (s *Server) Start() { return } - clusterIntr, err := mc.Get(fmt.Sprintf("cluster/shards/%s", s.shardId)) - if err != nil && err != errors.ErrNotFound { + req := &management.GetRequest{ + Key: fmt.Sprintf("cluster/shards/%s", s.shardId), + } + res, err := mc.Get(req) + if err != nil && err != blasterrors.ErrNotFound { + s.logger.Fatal(err.Error()) + return + } + value, err := protobuf.MarshalAny(res.Value) + if err != nil { s.logger.Fatal(err.Error()) return } - if clusterIntr != nil { - b, err := json.Marshal(clusterIntr) + if value != nil { + nodes := *value.(*map[string]interface{}) + nodesBytes, err := json.Marshal(nodes) if err != nil { s.logger.Fatal(err.Error()) return } var cluster *index.Cluster - err = json.Unmarshal(b, &cluster) + err = json.Unmarshal(nodesBytes, &cluster) if err != nil { s.logger.Fatal(err.Error()) return @@ -134,31 +147,38 @@ func (s *Server) Start() { return } s.logger.Debug("pull index config from manager", zap.String("address", mc.GetAddress())) - value, err := mc.Get("/index_config") + req := &management.GetRequest{ + Key: "/index_config", + } + resp, err := mc.Get(req) if err != nil { s.logger.Fatal(err.Error()) return } - indexMappingSrc, ok := (*value.(*map[string]interface{}))["index_mapping"] - if ok { - b, err := json.Marshal(indexMappingSrc) - if err != nil { - s.logger.Fatal(err.Error()) - return + value, err := protobuf.MarshalAny(resp.Value) + if value != nil { + indexConfigMap := *value.(*map[string]interface{}) + indexMappingSrc, ok := indexConfigMap["index_mapping"].(map[string]interface{}) + if ok { + indexMappingBytes, err := json.Marshal(indexMappingSrc) + if err != nil { + s.logger.Fatal(err.Error()) + return + } + s.indexMapping, err = indexutils.NewIndexMappingFromBytes(indexMappingBytes) + if err != nil { + s.logger.Fatal(err.Error()) + return + } } - s.indexMapping, err = indexutils.NewIndexMappingFromBytes(b) - if err != nil { - s.logger.Fatal(err.Error()) - return + indexTypeSrc, ok := indexConfigMap["index_type"] + if ok { + s.indexType = indexTypeSrc.(string) + } + indexStorageTypeSrc, ok := indexConfigMap["index_storage_type"] + if ok { + s.indexStorageType = indexStorageTypeSrc.(string) } - } - indexTypeSrc, ok := (*value.(*map[string]interface{}))["index_type"] - if ok { - s.indexType = indexTypeSrc.(string) - } - indexStorageTypeSrc, ok := (*value.(*map[string]interface{}))["index_storage_type"] - if ok { - s.indexStorageType = indexStorageTypeSrc.(string) } } else if s.peerGrpcAddress != "" { pc, err := NewGRPCClient(s.peerGrpcAddress) @@ -176,15 +196,17 @@ func (s *Server) Start() { } s.logger.Debug("pull index config from cluster peer", zap.String("address", pc.GetAddress())) - value, err := pc.GetIndexConfig() + req := &empty.Empty{} + res, err := pc.GetIndexConfig(req) if err != nil { s.logger.Fatal(err.Error()) return } - s.indexMapping = value["index_mapping"].(*mapping.IndexMappingImpl) - s.indexType = value["index_type"].(string) - s.indexStorageType = value["index_storage_type"].(string) + indexMapping, err := protobuf.MarshalAny(res.IndexConfig.IndexMapping) + s.indexMapping = indexMapping.(*mapping.IndexMappingImpl) + s.indexType = res.IndexConfig.IndexType + s.indexStorageType = res.IndexConfig.IndexStorageType } // bootstrap node? @@ -214,8 +236,15 @@ func (s *Server) Start() { return } + // create gRPC gateway + s.grpcGateway, err = NewGRPCGateway(s.node.Metadata.GrpcGatewayAddress, s.node.Metadata.GrpcAddress, s.logger) + if err != nil { + s.logger.Error(err.Error()) + return + } + // create HTTP router - s.httpRouter, err = NewRouter(s.node.Metadata.GrpcAddress, s.logger) + s.httpRouter, err = NewRouter(s.logger) if err != nil { s.logger.Fatal(err.Error()) return @@ -256,6 +285,12 @@ func (s *Server) Start() { } }() + // start gRPC gateway + s.logger.Info("start gRPC gateway") + go func() { + _ = s.grpcGateway.Start() + }() + // start HTTP server s.logger.Info("start HTTP server") go func() { @@ -276,7 +311,11 @@ func (s *Server) Start() { return } - err = client.ClusterJoin(s.node) + req := &index.ClusterJoinRequest{ + Node: s.node, + } + + _, err = client.ClusterJoin(req) if err != nil { s.logger.Fatal(err.Error()) return @@ -291,11 +330,18 @@ func (s *Server) Stop() { s.logger.Error(err.Error()) } + s.logger.Info("stop HTTP router") err = s.httpRouter.Close() if err != nil { s.logger.Error(err.Error()) } + s.logger.Info("stop gRPC gateway") + err = s.grpcGateway.Stop() + if err != nil { + s.logger.Error(err.Error()) + } + s.logger.Info("stop gRPC server") err = s.grpcServer.Stop() if err != nil { diff --git a/indexer/server_test.go b/indexer/server_test.go index dd2f7b1..7563ed3 100644 --- a/indexer/server_test.go +++ b/indexer/server_test.go @@ -26,12 +26,16 @@ import ( "github.com/blevesearch/bleve" "github.com/blevesearch/bleve/mapping" - "github.com/mosuka/blast/errors" + "github.com/golang/protobuf/ptypes/empty" + "github.com/google/go-cmp/cmp" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/logutils" + "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/index" "github.com/mosuka/blast/strutils" "github.com/mosuka/blast/testutils" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) func TestServer_Start(t *testing.T) { @@ -45,6 +49,7 @@ func TestServer_Start(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -59,8 +64,9 @@ func TestServer_Start(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -97,6 +103,7 @@ func TestServer_LivenessProbe(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -111,8 +118,9 @@ func TestServer_LivenessProbe(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -152,36 +160,39 @@ func TestServer_LivenessProbe(t *testing.T) { } // healthiness - healthiness, err := client.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) + reqHealthiness := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_HEALTHINESS} + resHealthiness, err := client.NodeHealthCheck(reqHealthiness) if err != nil { t.Fatalf("%v", err) } - expHealthiness := index.NodeHealthCheckResponse_HEALTHY.String() - actHealthiness := healthiness - if expHealthiness != actHealthiness { - t.Fatalf("expected content to see %v, saw %v", expHealthiness, actHealthiness) + expHealthinessState := index.NodeHealthCheckResponse_HEALTHY + actHealthinessState := resHealthiness.State + if expHealthinessState != actHealthinessState { + t.Fatalf("expected content to see %v, saw %v", expHealthinessState, actHealthinessState) } // liveness - liveness, err := client.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) + reqLiveness := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_LIVENESS} + resLiveness, err := client.NodeHealthCheck(reqLiveness) if err != nil { t.Fatalf("%v", err) } - expLiveness := index.NodeHealthCheckResponse_ALIVE.String() - actLiveness := liveness - if expLiveness != actLiveness { - t.Fatalf("expected content to see %v, saw %v", expLiveness, actLiveness) + expLivenessState := index.NodeHealthCheckResponse_ALIVE + actLivenessState := resLiveness.State + if expLivenessState != actLivenessState { + t.Fatalf("expected content to see %v, saw %v", expLivenessState, actLivenessState) } // readiness - readiness, err := client.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) + reqReadiness := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_READINESS} + resReadiness, err := client.NodeHealthCheck(reqReadiness) if err != nil { t.Fatalf("%v", err) } - expReadiness := index.NodeHealthCheckResponse_READY.String() - actReadiness := readiness - if expReadiness != actReadiness { - t.Fatalf("expected content to see %v, saw %v", expReadiness, actReadiness) + expReadinessState := index.NodeHealthCheckResponse_READY + actReadinessState := resReadiness.State + if expReadinessState != actReadinessState { + t.Fatalf("expected content to see %v, saw %v", expReadinessState, actReadinessState) } } @@ -196,6 +207,7 @@ func TestServer_GetNode(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -210,8 +222,9 @@ func TestServer_GetNode(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -251,7 +264,8 @@ func TestServer_GetNode(t *testing.T) { } // get node - nodeInfo, err := client.NodeInfo() + req := &empty.Empty{} + res, err := client.NodeInfo(req) if err != nil { t.Fatalf("%v", err) } @@ -260,11 +274,12 @@ func TestServer_GetNode(t *testing.T) { BindAddress: bindAddress, State: index.Node_LEADER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } - actNodeInfo := nodeInfo + actNodeInfo := res.Node if !reflect.DeepEqual(expNodeInfo, actNodeInfo) { t.Fatalf("expected content to see %v, saw %v", expNodeInfo, actNodeInfo) } @@ -281,6 +296,7 @@ func TestServer_GetCluster(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -295,8 +311,9 @@ func TestServer_GetCluster(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -336,7 +353,8 @@ func TestServer_GetCluster(t *testing.T) { } // get cluster - cluster, err := client.ClusterInfo() + req := &empty.Empty{} + res, err := client.ClusterInfo(req) if err != nil { t.Fatalf("%v", err) } @@ -347,13 +365,14 @@ func TestServer_GetCluster(t *testing.T) { BindAddress: bindAddress, State: index.Node_LEADER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, }, }, } - actCluster := cluster + actCluster := res.Cluster if !reflect.DeepEqual(expCluster, actCluster) { t.Fatalf("expected content to see %v, saw %v", expCluster, actCluster) } @@ -370,6 +389,7 @@ func TestServer_GetIndexMapping(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -384,8 +404,9 @@ func TestServer_GetIndexMapping(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -426,15 +447,17 @@ func TestServer_GetIndexMapping(t *testing.T) { expIndexMapping := indexMapping - actIndexConfigMap, err := client.GetIndexConfig() + req := &empty.Empty{} + res, err := client.GetIndexConfig(req) if err != nil { t.Fatalf("%v", err) } - actIndexMapping := actIndexConfigMap["index_mapping"].(*mapping.IndexMappingImpl) + im, err := protobuf.MarshalAny(res.IndexConfig.IndexMapping) if err != nil { t.Fatalf("%v", err) } + actIndexMapping := im.(*mapping.IndexMappingImpl) exp, err := json.Marshal(expIndexMapping) if err != nil { @@ -461,6 +484,7 @@ func TestServer_GetIndexType(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -475,8 +499,9 @@ func TestServer_GetIndexType(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -517,12 +542,13 @@ func TestServer_GetIndexType(t *testing.T) { expIndexType := indexType - actIndexConfigMap, err := client.GetIndexConfig() + req := &empty.Empty{} + res, err := client.GetIndexConfig(req) if err != nil { t.Fatalf("%v", err) } - actIndexType := actIndexConfigMap["index_type"].(string) + actIndexType := res.IndexConfig.IndexType if !reflect.DeepEqual(expIndexType, actIndexType) { t.Fatalf("expected content to see %v, saw %v", expIndexType, actIndexType) @@ -540,6 +566,7 @@ func TestServer_GetIndexStorageType(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -554,8 +581,9 @@ func TestServer_GetIndexStorageType(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -596,12 +624,13 @@ func TestServer_GetIndexStorageType(t *testing.T) { expIndexStorageType := indexStorageType - actIndexConfigMap, err := client.GetIndexConfig() + req := &empty.Empty{} + res, err := client.GetIndexConfig(req) if err != nil { t.Fatalf("%v", err) } - actIndexStorageType := actIndexConfigMap["index_storage_type"].(string) + actIndexStorageType := res.IndexConfig.IndexStorageType if !reflect.DeepEqual(expIndexStorageType, actIndexStorageType) { t.Fatalf("expected content to see %v, saw %v", expIndexStorageType, actIndexStorageType) @@ -619,6 +648,7 @@ func TestServer_GetIndexStats(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -633,8 +663,9 @@ func TestServer_GetIndexStats(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -689,17 +720,24 @@ func TestServer_GetIndexStats(t *testing.T) { "searches": float64(0), } - actIndexStats, err := client.GetIndexStats() + req := &empty.Empty{} + res, err := client.GetIndexStats(req) if err != nil { t.Fatalf("%v", err) } + is, err := protobuf.MarshalAny(res.IndexStats) + if err != nil { + t.Fatalf("%v", err) + } + actIndexStats := *is.(*map[string]interface{}) + if !reflect.DeepEqual(expIndexStats, actIndexStats) { t.Fatalf("expected content to see %v, saw %v", expIndexStats, actIndexStats) } } -func TestServer_PutDocument(t *testing.T) { +func TestServer_Index(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -710,6 +748,7 @@ func TestServer_PutDocument(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -724,8 +763,9 @@ func TestServer_PutDocument(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -764,10 +804,8 @@ func TestServer_PutDocument(t *testing.T) { t.Fatalf("%v", err) } - // put document - docs := make([]*index.Document, 0) + // index document docPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - // read index mapping file docFile1, err := os.Open(docPath1) if err != nil { t.Fatalf("%v", err) @@ -784,21 +822,17 @@ func TestServer_PutDocument(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - docs = append(docs, doc1) - count, err := client.IndexDocument(docs) + req := &index.IndexRequest{ + Id: doc1.Id, + Fields: doc1.Fields, + } + _, err = client.Index(req) if err != nil { t.Fatalf("%v", err) } - - expCount := 1 - actCount := count - - if expCount != actCount { - t.Fatalf("expected content to see %v, saw %v", expCount, actCount) - } } -func TestServer_GetDocument(t *testing.T) { +func TestServer_Get(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -809,6 +843,7 @@ func TestServer_GetDocument(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -823,8 +858,9 @@ func TestServer_GetDocument(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -863,61 +899,69 @@ func TestServer_GetDocument(t *testing.T) { t.Fatalf("%v", err) } - // put document - putDocs := make([]*index.Document, 0) - putDocPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - // read index mapping file - putDocFile1, err := os.Open(putDocPath1) + // index document + docPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") + docFile1, err := os.Open(docPath1) if err != nil { t.Fatalf("%v", err) } defer func() { - _ = putDocFile1.Close() + _ = docFile1.Close() }() - putDocBytes1, err := ioutil.ReadAll(putDocFile1) + docBytes1, err := ioutil.ReadAll(docFile1) if err != nil { t.Fatalf("%v", err) } - putDoc1 := &index.Document{} - err = index.UnmarshalDocument(putDocBytes1, putDoc1) + doc1 := &index.Document{} + err = index.UnmarshalDocument(docBytes1, doc1) if err != nil { t.Fatalf("%v", err) } - putDocs = append(putDocs, putDoc1) - putCount, err := client.IndexDocument(putDocs) + indexReq := &index.IndexRequest{ + Id: doc1.Id, + Fields: doc1.Fields, + } + _, err = client.Index(indexReq) if err != nil { t.Fatalf("%v", err) } - expPutCount := 1 - actPutCount := putCount - - if expPutCount != actPutCount { - t.Fatalf("expected content to see %v, saw %v", expPutCount, actPutCount) - } - // get document - getDoc1, err := client.GetDocument("enwiki_1") + getReq := &index.GetRequest{Id: "enwiki_1"} + getRes, err := client.Get(getReq) if err != nil { t.Fatalf("%v", err) } - expGetDoc1, _ := index.MarshalDocument(putDoc1) - actGetDoc1, _ := index.MarshalDocument(getDoc1) - if !reflect.DeepEqual(expGetDoc1, actGetDoc1) { - t.Fatalf("expected content to see %v, saw %v", expGetDoc1, actGetDoc1) + expFields, err := protobuf.MarshalAny(doc1.Fields) + if err != nil { + t.Fatalf("%v", err) + } + actFields, err := protobuf.MarshalAny(getRes.Fields) + if err != nil { + t.Fatalf("%v", err) + } + if !cmp.Equal(expFields, actFields) { + t.Fatalf("expected content to see %v, saw %v", expFields, actFields) } // get non-existing document - getDocFields2, err := client.GetDocument("doc2") - if err != errors.ErrNotFound { - t.Fatalf("%v", err) + getReq2 := &index.GetRequest{Id: "non-existing"} + getRes2, err := client.Get(getReq2) + if err != nil { + st, _ := status.FromError(err) + switch st.Code() { + case codes.NotFound: + // noop + default: + t.Fatalf("%v", err) + } } - if getDocFields2 != nil { - t.Fatalf("expected content to see nil, saw %v", getDocFields2) + if getRes2 != nil { + t.Fatalf("expected content to see nil, saw %v", getRes2) } } -func TestServer_DeleteDocument(t *testing.T) { +func TestServer_Delete(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -928,6 +972,7 @@ func TestServer_DeleteDocument(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -942,8 +987,9 @@ func TestServer_DeleteDocument(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -982,86 +1028,84 @@ func TestServer_DeleteDocument(t *testing.T) { t.Fatalf("%v", err) } - // put document - putDocs := make([]*index.Document, 0) - putDocPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - // read index mapping file - putDocFile1, err := os.Open(putDocPath1) + // index document + docPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") + docFile1, err := os.Open(docPath1) if err != nil { t.Fatalf("%v", err) } defer func() { - _ = putDocFile1.Close() + _ = docFile1.Close() }() - putDocBytes1, err := ioutil.ReadAll(putDocFile1) + docBytes1, err := ioutil.ReadAll(docFile1) if err != nil { t.Fatalf("%v", err) } - putDoc1 := &index.Document{} - err = index.UnmarshalDocument(putDocBytes1, putDoc1) + doc1 := &index.Document{} + err = index.UnmarshalDocument(docBytes1, doc1) if err != nil { t.Fatalf("%v", err) } - putDocs = append(putDocs, putDoc1) - putCount, err := client.IndexDocument(putDocs) + indexReq := &index.IndexRequest{ + Id: doc1.Id, + Fields: doc1.Fields, + } + _, err = client.Index(indexReq) if err != nil { t.Fatalf("%v", err) } - expPutCount := 1 - actPutCount := putCount - - if expPutCount != actPutCount { - t.Fatalf("expected content to see %v, saw %v", expPutCount, actPutCount) - } - // get document - getDoc1, err := client.GetDocument("enwiki_1") + getReq := &index.GetRequest{Id: "enwiki_1"} + getRes, err := client.Get(getReq) if err != nil { t.Fatalf("%v", err) } - expGetDoc1, _ := index.MarshalDocument(putDoc1) - actGetDoc1, _ := index.MarshalDocument(getDoc1) - if !reflect.DeepEqual(expGetDoc1, actGetDoc1) { - t.Fatalf("expected content to see %v, saw %v", expGetDoc1, actGetDoc1) + expFields, err := protobuf.MarshalAny(doc1.Fields) + if err != nil { + t.Fatalf("%v", err) } - - // get non-existing document - getDoc2, err := client.GetDocument("non-existing") - if err != errors.ErrNotFound { + actFields, err := protobuf.MarshalAny(getRes.Fields) + if err != nil { t.Fatalf("%v", err) } - if getDoc2 != nil { - t.Fatalf("expected content to see nil, saw %v", getDoc2) + if !cmp.Equal(expFields, actFields) { + t.Fatalf("expected content to see %v, saw %v", expFields, actFields) } // delete document - delCount, err := client.DeleteDocument([]string{"enwiki_1"}) + deleteReq := &index.DeleteRequest{Id: "enwiki_1"} + _, err = client.Delete(deleteReq) if err != nil { t.Fatalf("%v", err) } - expDelCount := 1 - actDelCount := delCount - if expDelCount != actDelCount { - t.Fatalf("expected content to see %v, saw %v", expDelCount, actDelCount) - } - // get document - getDoc1, err = client.GetDocument("enwiki_1") - if err != errors.ErrNotFound { - t.Fatalf("%v", err) + // get document again + getRes, err = client.Get(getReq) + if err != nil { + st, _ := status.FromError(err) + switch st.Code() { + case codes.NotFound: + // noop + default: + t.Fatalf("%v", err) + } } - if getDoc1 != nil { - t.Fatalf("expected content to see nil, saw %v", getDoc1) + if getRes != nil { + t.Fatalf("expected content to see nil, saw %v", getRes) } // delete non-existing document - getDoc1, err = client.GetDocument("non-existing") - if err != errors.ErrNotFound { - t.Fatalf("%v", err) - } - if getDoc1 != nil { - t.Fatalf("expected content to see nil, saw %v", getDoc1) + deleteReq2 := &index.DeleteRequest{Id: "non-existing"} + _, err = client.Delete(deleteReq2) + if err != nil { + st, _ := status.FromError(err) + switch st.Code() { + case codes.NotFound: + // noop + default: + t.Fatalf("%v", err) + } } } @@ -1076,6 +1120,7 @@ func TestServer_Search(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1090,8 +1135,9 @@ func TestServer_Search(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -1130,42 +1176,53 @@ func TestServer_Search(t *testing.T) { t.Fatalf("%v", err) } - // put document - putDocs := make([]*index.Document, 0) - putDocPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - // read index mapping file - putDocFile1, err := os.Open(putDocPath1) + // index document + docPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") + docFile1, err := os.Open(docPath1) if err != nil { t.Fatalf("%v", err) } defer func() { - _ = putDocFile1.Close() + _ = docFile1.Close() }() - putDocBytes1, err := ioutil.ReadAll(putDocFile1) + docBytes1, err := ioutil.ReadAll(docFile1) if err != nil { t.Fatalf("%v", err) } - putDoc1 := &index.Document{} - err = index.UnmarshalDocument(putDocBytes1, putDoc1) + doc1 := &index.Document{} + err = index.UnmarshalDocument(docBytes1, doc1) if err != nil { t.Fatalf("%v", err) } - putDocs = append(putDocs, putDoc1) - putCount, err := client.IndexDocument(putDocs) + indexReq := &index.IndexRequest{ + Id: doc1.Id, + Fields: doc1.Fields, + } + _, err = client.Index(indexReq) if err != nil { t.Fatalf("%v", err) } - expPutCount := 1 - actPutCount := putCount - - if expPutCount != actPutCount { - t.Fatalf("expected content to see %v, saw %v", expPutCount, actPutCount) + // get document + getReq := &index.GetRequest{Id: "enwiki_1"} + getRes, err := client.Get(getReq) + if err != nil { + t.Fatalf("%v", err) + } + expFields, err := protobuf.MarshalAny(doc1.Fields) + if err != nil { + t.Fatalf("%v", err) + } + actFields, err := protobuf.MarshalAny(getRes.Fields) + if err != nil { + t.Fatalf("%v", err) + } + if !cmp.Equal(expFields, actFields) { + t.Fatalf("expected content to see %v, saw %v", expFields, actFields) } // search searchRequestPath := filepath.Join(curDir, "../example/wiki_search_request.json") - searchRequestFile, err := os.Open(searchRequestPath) if err != nil { t.Fatalf("%v", err) @@ -1173,24 +1230,27 @@ func TestServer_Search(t *testing.T) { defer func() { _ = searchRequestFile.Close() }() - searchRequestByte, err := ioutil.ReadAll(searchRequestFile) if err != nil { t.Fatalf("%v", err) } - searchRequest := bleve.NewSearchRequest(nil) - err = json.Unmarshal(searchRequestByte, searchRequest) + searchReq := &index.SearchRequest{} + marshaler := JsonMarshaler{} + err = marshaler.Unmarshal(searchRequestByte, searchReq) if err != nil { t.Fatalf("%v", err) } - - searchResult1, err := client.Search(searchRequest) + searchRes, err := client.Search(searchReq) + if err != nil { + t.Fatalf("%v", err) + } + searchResult, err := protobuf.MarshalAny(searchRes.SearchResult) if err != nil { t.Fatalf("%v", err) } expTotal := uint64(1) - actTotal := searchResult1.Total + actTotal := searchResult.(*bleve.SearchResult).Total if expTotal != actTotal { t.Fatalf("expected content to see %v, saw %v", expTotal, actTotal) } @@ -1207,6 +1267,7 @@ func TestCluster_Start(t *testing.T) { shardId1 := "" peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1221,8 +1282,9 @@ func TestCluster_Start(t *testing.T) { BindAddress: bindAddress1, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -1248,6 +1310,7 @@ func TestCluster_Start(t *testing.T) { shardId2 := "" peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1262,8 +1325,9 @@ func TestCluster_Start(t *testing.T) { BindAddress: bindAddress2, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -1289,6 +1353,7 @@ func TestCluster_Start(t *testing.T) { shardId3 := "" peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1303,8 +1368,9 @@ func TestCluster_Start(t *testing.T) { BindAddress: bindAddress3, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -1330,7 +1396,7 @@ func TestCluster_Start(t *testing.T) { time.Sleep(5 * time.Second) } -func TestCluster_LivenessProbe(t *testing.T) { +func TestCluster_HealthCheck(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -1341,6 +1407,7 @@ func TestCluster_LivenessProbe(t *testing.T) { shardId1 := "" peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1355,8 +1422,9 @@ func TestCluster_LivenessProbe(t *testing.T) { BindAddress: bindAddress1, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -1382,6 +1450,7 @@ func TestCluster_LivenessProbe(t *testing.T) { shardId2 := "" peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1396,8 +1465,9 @@ func TestCluster_LivenessProbe(t *testing.T) { BindAddress: bindAddress2, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -1423,6 +1493,7 @@ func TestCluster_LivenessProbe(t *testing.T) { shardId3 := "" peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1437,8 +1508,9 @@ func TestCluster_LivenessProbe(t *testing.T) { BindAddress: bindAddress3, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -1486,101 +1558,105 @@ func TestCluster_LivenessProbe(t *testing.T) { t.Fatalf("%v", err) } + healthinessReq := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_HEALTHINESS} + livenessReq := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_LIVENESS} + readinessReq := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_READINESS} + // healthiness - healthiness1, err := client1.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) + healthinessRes1, err := client1.NodeHealthCheck(healthinessReq) if err != nil { t.Fatalf("%v", err) } - expHealthiness1 := index.NodeHealthCheckResponse_HEALTHY.String() - actHealthiness1 := healthiness1 + expHealthiness1 := index.NodeHealthCheckResponse_HEALTHY + actHealthiness1 := healthinessRes1.State if expHealthiness1 != actHealthiness1 { t.Fatalf("expected content to see %v, saw %v", expHealthiness1, actHealthiness1) } // liveness - liveness1, err := client1.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) + livenessRes1, err := client1.NodeHealthCheck(livenessReq) if err != nil { t.Fatalf("%v", err) } - expLiveness1 := index.NodeHealthCheckResponse_ALIVE.String() - actLiveness1 := liveness1 + expLiveness1 := index.NodeHealthCheckResponse_ALIVE + actLiveness1 := livenessRes1.State if expLiveness1 != actLiveness1 { t.Fatalf("expected content to see %v, saw %v", expLiveness1, actLiveness1) } // readiness - readiness1, err := client1.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) + readinessRes1, err := client1.NodeHealthCheck(readinessReq) if err != nil { t.Fatalf("%v", err) } - expReadiness1 := index.NodeHealthCheckResponse_READY.String() - actReadiness1 := readiness1 + expReadiness1 := index.NodeHealthCheckResponse_READY + actReadiness1 := readinessRes1.State if expReadiness1 != actReadiness1 { t.Fatalf("expected content to see %v, saw %v", expReadiness1, actReadiness1) } // healthiness - healthiness2, err := client2.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) + healthinessRes2, err := client2.NodeHealthCheck(healthinessReq) if err != nil { t.Fatalf("%v", err) } - expHealthiness2 := index.NodeHealthCheckResponse_HEALTHY.String() - actHealthiness2 := healthiness2 + expHealthiness2 := index.NodeHealthCheckResponse_HEALTHY + actHealthiness2 := healthinessRes2.State if expHealthiness2 != actHealthiness2 { t.Fatalf("expected content to see %v, saw %v", expHealthiness2, actHealthiness2) } // liveness - liveness2, err := client2.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) + livenessRes2, err := client2.NodeHealthCheck(livenessReq) if err != nil { t.Fatalf("%v", err) } - expLiveness2 := index.NodeHealthCheckResponse_ALIVE.String() - actLiveness2 := liveness2 + expLiveness2 := index.NodeHealthCheckResponse_ALIVE + actLiveness2 := livenessRes2.State if expLiveness2 != actLiveness2 { t.Fatalf("expected content to see %v, saw %v", expLiveness2, actLiveness2) } // readiness - readiness2, err := client2.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) + readinessRes2, err := client2.NodeHealthCheck(readinessReq) if err != nil { t.Fatalf("%v", err) } - expReadiness2 := index.NodeHealthCheckResponse_READY.String() - actReadiness2 := readiness2 + expReadiness2 := index.NodeHealthCheckResponse_READY + actReadiness2 := readinessRes2.State if expReadiness2 != actReadiness2 { t.Fatalf("expected content to see %v, saw %v", expReadiness2, actReadiness2) } // healthiness - healthiness3, err := client3.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) + healthinessRes3, err := client3.NodeHealthCheck(healthinessReq) if err != nil { t.Fatalf("%v", err) } - expHealthiness3 := index.NodeHealthCheckResponse_HEALTHY.String() - actHealthiness3 := healthiness3 + expHealthiness3 := index.NodeHealthCheckResponse_HEALTHY + actHealthiness3 := healthinessRes3.State if expHealthiness3 != actHealthiness3 { t.Fatalf("expected content to see %v, saw %v", expHealthiness3, actHealthiness3) } // liveness - liveness3, err := client3.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) + livenessRes3, err := client3.NodeHealthCheck(livenessReq) if err != nil { t.Fatalf("%v", err) } - expLiveness3 := index.NodeHealthCheckResponse_ALIVE.String() - actLiveness3 := liveness3 + expLiveness3 := index.NodeHealthCheckResponse_ALIVE + actLiveness3 := livenessRes3.State if expLiveness3 != actLiveness3 { t.Fatalf("expected content to see %v, saw %v", expLiveness3, actLiveness3) } // readiness - readiness3, err := client3.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) + readinessRes3, err := client3.NodeHealthCheck(readinessReq) if err != nil { t.Fatalf("%v", err) } - expReadiness3 := index.NodeHealthCheckResponse_READY.String() - actReadiness3 := readiness3 + expReadiness3 := index.NodeHealthCheckResponse_READY + actReadiness3 := readinessRes3.State if expReadiness3 != actReadiness3 { t.Fatalf("expected content to see %v, saw %v", expReadiness3, actReadiness3) } @@ -1597,6 +1673,7 @@ func TestCluster_GetNode(t *testing.T) { shardId1 := "" peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1611,8 +1688,9 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress1, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -1638,6 +1716,7 @@ func TestCluster_GetNode(t *testing.T) { shardId2 := "" peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1652,8 +1731,9 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress2, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -1679,6 +1759,7 @@ func TestCluster_GetNode(t *testing.T) { shardId3 := "" peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1693,8 +1774,9 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress3, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -1743,7 +1825,7 @@ func TestCluster_GetNode(t *testing.T) { } // get all node info from all nodes - node11, err := client1.NodeInfo() + node11, err := client1.NodeInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -1752,16 +1834,17 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress1, State: index.Node_LEADER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } - actNode11 := node11 + actNode11 := node11.Node if !reflect.DeepEqual(expNode11, actNode11) { t.Fatalf("expected content to see %v, saw %v", expNode11, actNode11) } - node21, err := client2.NodeInfo() + node21, err := client2.NodeInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -1770,16 +1853,17 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress2, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } - actNode21 := node21 + actNode21 := node21.Node if !reflect.DeepEqual(expNode21, actNode21) { t.Fatalf("expected content to see %v, saw %v", expNode21, actNode21) } - node31, err := client3.NodeInfo() + node31, err := client3.NodeInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -1788,11 +1872,12 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress3, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } - actNode31 := node31 + actNode31 := node31.Node if !reflect.DeepEqual(expNode31, actNode31) { t.Fatalf("expected content to see %v, saw %v", expNode31, actNode31) } @@ -1809,6 +1894,7 @@ func TestCluster_GetCluster(t *testing.T) { shardId1 := "" peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1823,8 +1909,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress1, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -1850,6 +1937,7 @@ func TestCluster_GetCluster(t *testing.T) { shardId2 := "" peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1864,8 +1952,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress2, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -1891,6 +1980,7 @@ func TestCluster_GetCluster(t *testing.T) { shardId3 := "" peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1905,8 +1995,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress3, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -1955,7 +2046,7 @@ func TestCluster_GetCluster(t *testing.T) { } // get cluster info from manager1 - cluster1, err := client1.ClusterInfo() + cluster1, err := client1.ClusterInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -1966,8 +2057,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress1, State: index.Node_LEADER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, }, nodeId2: { @@ -1975,8 +2067,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress2, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, }, nodeId3: { @@ -1984,18 +2077,19 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress3, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, }, }, } - actCluster1 := cluster1 + actCluster1 := cluster1.Cluster if !reflect.DeepEqual(expCluster1, actCluster1) { t.Fatalf("expected content to see %v, saw %v", expCluster1, actCluster1) } - cluster2, err := client2.ClusterInfo() + cluster2, err := client2.ClusterInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -2006,8 +2100,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress1, State: index.Node_LEADER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, }, nodeId2: { @@ -2015,8 +2110,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress2, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, }, nodeId3: { @@ -2024,18 +2120,19 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress3, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, }, }, } - actCluster2 := cluster2 + actCluster2 := cluster2.Cluster if !reflect.DeepEqual(expCluster2, actCluster2) { t.Fatalf("expected content to see %v, saw %v", expCluster2, actCluster2) } - cluster3, err := client3.ClusterInfo() + cluster3, err := client3.ClusterInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -2046,8 +2143,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress1, State: index.Node_LEADER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, }, nodeId2: { @@ -2055,8 +2153,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress2, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, }, nodeId3: { @@ -2064,13 +2163,14 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress3, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, }, }, } - actCluster3 := cluster3 + actCluster3 := cluster3.Cluster if !reflect.DeepEqual(expCluster3, actCluster3) { t.Fatalf("expected content to see %v, saw %v", expCluster3, actCluster3) } diff --git a/manager/grpc_client.go b/manager/grpc_client.go index 6935724..4d732a4 100644 --- a/manager/grpc_client.go +++ b/manager/grpc_client.go @@ -16,13 +16,9 @@ package manager import ( "context" - "errors" "math" - "github.com/golang/protobuf/ptypes/any" "github.com/golang/protobuf/ptypes/empty" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/management" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -95,180 +91,66 @@ func (c *GRPCClient) GetAddress() string { return c.conn.Target() } -func (c *GRPCClient) NodeHealthCheck(probe string, opts ...grpc.CallOption) (string, error) { - req := &management.NodeHealthCheckRequest{} - - switch probe { - case management.NodeHealthCheckRequest_HEALTHINESS.String(): - req.Probe = management.NodeHealthCheckRequest_HEALTHINESS - case management.NodeHealthCheckRequest_LIVENESS.String(): - req.Probe = management.NodeHealthCheckRequest_LIVENESS - case management.NodeHealthCheckRequest_READINESS.String(): - req.Probe = management.NodeHealthCheckRequest_READINESS - default: - req.Probe = management.NodeHealthCheckRequest_HEALTHINESS - } - - resp, err := c.client.NodeHealthCheck(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - return management.NodeHealthCheckResponse_UNHEALTHY.String(), errors.New(st.Message()) - } - - return resp.State.String(), nil +func (c *GRPCClient) NodeHealthCheck(req *management.NodeHealthCheckRequest, opts ...grpc.CallOption) (*management.NodeHealthCheckResponse, error) { + return c.client.NodeHealthCheck(c.ctx, req, opts...) } -func (c *GRPCClient) NodeInfo(opts ...grpc.CallOption) (*management.Node, error) { - resp, err := c.client.NodeInfo(c.ctx, &empty.Empty{}, opts...) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - - return resp.Node, nil +func (c *GRPCClient) NodeInfo(req *empty.Empty, opts ...grpc.CallOption) (*management.NodeInfoResponse, error) { + return c.client.NodeInfo(c.ctx, req, opts...) } -func (c *GRPCClient) ClusterJoin(node *management.Node, opts ...grpc.CallOption) error { - req := &management.ClusterJoinRequest{ - Node: node, - } - - _, err := c.client.ClusterJoin(c.ctx, req, opts...) - if err != nil { - return err - } - - return nil +func (c *GRPCClient) ClusterJoin(req *management.ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.ClusterJoin(c.ctx, req, opts...) } -func (c *GRPCClient) ClusterLeave(id string, opts ...grpc.CallOption) error { - req := &management.ClusterLeaveRequest{ - Id: id, - } - - _, err := c.client.ClusterLeave(c.ctx, req, opts...) - if err != nil { - return err - } - - return nil +func (c *GRPCClient) ClusterLeave(req *management.ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.ClusterLeave(c.ctx, req, opts...) } -func (c *GRPCClient) ClusterInfo(opts ...grpc.CallOption) (*management.Cluster, error) { - resp, err := c.client.ClusterInfo(c.ctx, &empty.Empty{}, opts...) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - - return resp.Cluster, nil +func (c *GRPCClient) ClusterInfo(req *empty.Empty, opts ...grpc.CallOption) (*management.ClusterInfoResponse, error) { + return c.client.ClusterInfo(c.ctx, &empty.Empty{}, opts...) } -func (c *GRPCClient) ClusterWatch(opts ...grpc.CallOption) (management.Management_ClusterWatchClient, error) { - req := &empty.Empty{} - - watchClient, err := c.client.ClusterWatch(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - return nil, errors.New(st.Message()) - } - - return watchClient, nil +func (c *GRPCClient) ClusterWatch(req *empty.Empty, opts ...grpc.CallOption) (management.Management_ClusterWatchClient, error) { + return c.client.ClusterWatch(c.ctx, req, opts...) } -func (c *GRPCClient) Get(key string, opts ...grpc.CallOption) (interface{}, error) { - req := &management.GetRequest{ - Key: key, - } - - resp, err := c.client.Get(c.ctx, req, opts...) +func (c *GRPCClient) Get(req *management.GetRequest, opts ...grpc.CallOption) (*management.GetResponse, error) { + res, err := c.client.Get(c.ctx, req, opts...) if err != nil { st, _ := status.FromError(err) - switch st.Code() { case codes.NotFound: - return nil, blasterrors.ErrNotFound + return &management.GetResponse{}, nil default: - return nil, errors.New(st.Message()) + return nil, err } } - - value, err := protobuf.MarshalAny(resp.Value) - - return value, nil + return res, nil } -func (c *GRPCClient) Set(key string, value interface{}, opts ...grpc.CallOption) error { - valueAny := &any.Any{} - err := protobuf.UnmarshalAny(value, valueAny) - if err != nil { - return err - } - - req := &management.SetRequest{ - Key: key, - Value: valueAny, - } - - _, err = c.client.Set(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - switch st.Code() { - case codes.NotFound: - return blasterrors.ErrNotFound - default: - return errors.New(st.Message()) - } - } - - return nil +func (c *GRPCClient) Set(req *management.SetRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.Set(c.ctx, req, opts...) } -func (c *GRPCClient) Delete(key string, opts ...grpc.CallOption) error { - req := &management.DeleteRequest{ - Key: key, - } - - _, err := c.client.Delete(c.ctx, req, opts...) +func (c *GRPCClient) Delete(req *management.DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + res, err := c.client.Delete(c.ctx, req, opts...) if err != nil { st, _ := status.FromError(err) - switch st.Code() { case codes.NotFound: - return blasterrors.ErrNotFound + return &empty.Empty{}, nil default: - return errors.New(st.Message()) + return nil, err } } - - return nil + return res, nil } -func (c *GRPCClient) Watch(key string, opts ...grpc.CallOption) (management.Management_WatchClient, error) { - req := &management.WatchRequest{ - Key: key, - } - - watchClient, err := c.client.Watch(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - return nil, errors.New(st.Message()) - } - - return watchClient, nil +func (c *GRPCClient) Watch(req *management.WatchRequest, opts ...grpc.CallOption) (management.Management_WatchClient, error) { + return c.client.Watch(c.ctx, req, opts...) } -func (c *GRPCClient) Snapshot(opts ...grpc.CallOption) error { - _, err := c.client.Snapshot(c.ctx, &empty.Empty{}) - if err != nil { - st, _ := status.FromError(err) - - return errors.New(st.Message()) - } - - return nil +func (c *GRPCClient) Snapshot(req *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.Snapshot(c.ctx, &empty.Empty{}) } diff --git a/manager/grpc_gateway.go b/manager/grpc_gateway.go new file mode 100644 index 0000000..3f505d4 --- /dev/null +++ b/manager/grpc_gateway.go @@ -0,0 +1,172 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manager + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + + "github.com/golang/protobuf/ptypes/any" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/management" + "go.uber.org/zap" + "google.golang.org/grpc" +) + +type JsonMarshaler struct{} + +// ContentType always Returns "application/json". +func (*JsonMarshaler) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JsonMarshaler) Marshal(v interface{}) ([]byte, error) { + switch v.(type) { + case *management.GetResponse: + value, err := protobuf.MarshalAny(v.(*management.GetResponse).Value) + if err != nil { + return nil, err + } + return json.Marshal( + map[string]interface{}{ + "value": value, + }, + ) + default: + return json.Marshal(v) + } +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JsonMarshaler) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JsonMarshaler) NewDecoder(r io.Reader) runtime.Decoder { + return runtime.DecoderFunc( + func(v interface{}) error { + buffer, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + switch v.(type) { + case *management.SetRequest: + var tmpValue map[string]interface{} + err = json.Unmarshal(buffer, &tmpValue) + if err != nil { + return err + } + value, ok := tmpValue["value"] + if !ok { + return errors.New("value does not exist") + } + v.(*management.SetRequest).Value = &any.Any{} + return protobuf.UnmarshalAny(value, v.(*management.SetRequest).Value) + default: + return json.Unmarshal(buffer, v) + } + }, + ) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JsonMarshaler) NewEncoder(w io.Writer) runtime.Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JsonMarshaler) Delimiter() []byte { + return []byte("\n") +} + +type GRPCGateway struct { + grpcGatewayAddr string + grpcAddr string + logger *zap.Logger + + ctx context.Context + cancel context.CancelFunc + listener net.Listener +} + +func NewGRPCGateway(grpcGatewayAddr string, grpcAddr string, logger *zap.Logger) (*GRPCGateway, error) { + return &GRPCGateway{ + grpcGatewayAddr: grpcGatewayAddr, + grpcAddr: grpcAddr, + logger: logger, + }, nil +} + +func (s *GRPCGateway) Start() error { + s.ctx, s.cancel = NewGRPCContext() + + mux := runtime.NewServeMux( + runtime.WithMarshalerOption("application/json", new(JsonMarshaler)), + ) + opts := []grpc.DialOption{grpc.WithInsecure()} + + err := management.RegisterManagementHandlerFromEndpoint(s.ctx, mux, s.grpcAddr, opts) + if err != nil { + return err + } + + s.listener, err = net.Listen("tcp", s.grpcGatewayAddr) + if err != nil { + return err + } + + err = http.Serve(s.listener, mux) + if err != nil { + return err + } + + return nil +} + +func (s *GRPCGateway) Stop() error { + defer s.cancel() + + err := s.listener.Close() + if err != nil { + return err + } + + return nil +} + +func (s *GRPCGateway) GetAddress() (string, error) { + tcpAddr, err := net.ResolveTCPAddr("tcp", s.listener.Addr().String()) + if err != nil { + return "", err + } + + v4Addr := "" + if tcpAddr.IP.To4() != nil { + v4Addr = tcpAddr.IP.To4().String() + } + port := tcpAddr.Port + + return fmt.Sprintf("%s:%d", v4Addr, port), nil +} diff --git a/manager/grpc_server.go b/manager/grpc_server.go index 453e240..8d17486 100644 --- a/manager/grpc_server.go +++ b/manager/grpc_server.go @@ -40,22 +40,26 @@ type GRPCServer struct { func NewGRPCServer(grpcAddr string, service management.ManagementServer, logger *zap.Logger) (*GRPCServer, error) { server := grpc.NewServer( - grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( - //grpc_ctxtags.StreamServerInterceptor(), - //grpc_opentracing.StreamServerInterceptor(), - grpc_prometheus.StreamServerInterceptor, - grpc_zap.StreamServerInterceptor(logger), - //grpc_auth.StreamServerInterceptor(myAuthFunction), - //grpc_recovery.StreamServerInterceptor(), - )), - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( - //grpc_ctxtags.UnaryServerInterceptor(), - //grpc_opentracing.UnaryServerInterceptor(), - grpc_prometheus.UnaryServerInterceptor, - grpc_zap.UnaryServerInterceptor(logger), - //grpc_auth.UnaryServerInterceptor(myAuthFunction), - //grpc_recovery.UnaryServerInterceptor(), - )), + grpc.StreamInterceptor( + grpc_middleware.ChainStreamServer( + //grpc_ctxtags.StreamServerInterceptor(), + //grpc_opentracing.StreamServerInterceptor(), + grpc_prometheus.StreamServerInterceptor, + grpc_zap.StreamServerInterceptor(logger), + //grpc_auth.StreamServerInterceptor(myAuthFunction), + //grpc_recovery.StreamServerInterceptor(), + ), + ), + grpc.UnaryInterceptor( + grpc_middleware.ChainUnaryServer( + //grpc_ctxtags.UnaryServerInterceptor(), + //grpc_opentracing.UnaryServerInterceptor(), + grpc_prometheus.UnaryServerInterceptor, + grpc_zap.UnaryServerInterceptor(logger), + //grpc_auth.UnaryServerInterceptor(myAuthFunction), + //grpc_recovery.UnaryServerInterceptor(), + ), + ), ) management.RegisterManagementServer(server, service) diff --git a/manager/grpc_service.go b/manager/grpc_service.go index c0745ae..c79f7ad 100644 --- a/manager/grpc_service.go +++ b/manager/grpc_service.go @@ -79,16 +79,28 @@ func (s *GRPCService) Stop() error { } func (s *GRPCService) getLeaderClient() (*GRPCClient, error) { + //leaderId, err := s.raftServer.LeaderID(10 * time.Second) + //if err != nil { + // return nil, err + //} + //client, exist := s.peerClients[string(leaderId)] + //if !exist { + // err := errors.New("there is no client for leader") + // s.logger.Error(err.Error()) + // return nil, err + //} + //return client, nil + for id, node := range s.cluster.Nodes { switch node.State { case management.Node_LEADER: - if client, exist := s.peerClients[id]; exist { - return client, nil - } + } + if client, exist := s.peerClients[id]; exist { + return client, nil } } - err := errors.New("there is no leader") + err := errors.New("there is no client for leader") s.logger.Error(err.Error()) return nil, err } @@ -284,12 +296,18 @@ func (s *GRPCService) NodeHealthCheck(ctx context.Context, req *management.NodeH resp := &management.NodeHealthCheckResponse{} switch req.Probe { + case management.NodeHealthCheckRequest_UNKNOWN: + fallthrough case management.NodeHealthCheckRequest_HEALTHINESS: resp.State = management.NodeHealthCheckResponse_HEALTHY case management.NodeHealthCheckRequest_LIVENESS: resp.State = management.NodeHealthCheckResponse_ALIVE case management.NodeHealthCheckRequest_READINESS: resp.State = management.NodeHealthCheckResponse_READY + default: + err := errors.New("unknown probe") + s.logger.Error(err.Error()) + return resp, status.Error(codes.InvalidArgument, err.Error()) } return resp, nil @@ -325,7 +343,8 @@ func (s *GRPCService) getPeerNode(id string) (*management.Node, error) { return nil, err } - node, err := s.peerClients[id].NodeInfo() + req := &empty.Empty{} + resp, err := s.peerClients[id].NodeInfo(req) if err != nil { s.logger.Debug(err.Error(), zap.String("id", id)) return &management.Node{ @@ -338,7 +357,7 @@ func (s *GRPCService) getPeerNode(id string) (*management.Node, error) { }, nil } - return node, nil + return resp.Node, nil } func (s *GRPCService) getNode(id string) (*management.Node, error) { @@ -377,7 +396,12 @@ func (s *GRPCService) setNode(node *management.Node) error { s.logger.Error(err.Error()) return err } - err = client.ClusterJoin(node) + + req := &management.ClusterJoinRequest{ + Node: node, + } + + _, err = client.ClusterJoin(req) if err != nil { s.logger.Error(err.Error()) return err @@ -413,7 +437,12 @@ func (s *GRPCService) deleteNode(id string) error { s.logger.Error(err.Error()) return err } - err = client.ClusterLeave(id) + + req := &management.ClusterLeaveRequest{ + Id: id, + } + + _, err = client.ClusterLeave(req) if err != nil { s.logger.Error(err.Error()) return err @@ -534,13 +563,13 @@ func (s *GRPCService) Set(ctx context.Context, req *management.SetRequest) (*emp resp := &empty.Empty{} - value, err := protobuf.MarshalAny(req.Value) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - if s.raftServer.IsLeader() { + value, err := protobuf.MarshalAny(req.Value) + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) + } + err = s.raftServer.SetValue(req.Key, value) if err != nil { s.logger.Error(err.Error()) @@ -558,7 +587,7 @@ func (s *GRPCService) Set(ctx context.Context, req *management.SetRequest) (*emp s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } - err = client.Set(req.Key, value) + resp, err = client.Set(req) if err != nil { s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) @@ -604,7 +633,7 @@ func (s *GRPCService) Delete(ctx context.Context, req *management.DeleteRequest) s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } - err = client.Delete(req.Key) + resp, err = client.Delete(req) if err != nil { switch err { case blasterrors.ErrNotFound: diff --git a/manager/http_handler.go b/manager/http_handler.go new file mode 100644 index 0000000..0ceb447 --- /dev/null +++ b/manager/http_handler.go @@ -0,0 +1,79 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manager + +import ( + "net/http" + "time" + + "github.com/gorilla/mux" + blasthttp "github.com/mosuka/blast/http" + "github.com/mosuka/blast/version" + "github.com/prometheus/client_golang/prometheus/promhttp" + "go.uber.org/zap" +) + +type Router struct { + mux.Router + + logger *zap.Logger +} + +func NewRouter(logger *zap.Logger) (*Router, error) { + router := &Router{ + logger: logger, + } + + router.StrictSlash(true) + + router.Handle("/", NewRootHandler(logger)).Methods("GET") + router.Handle("/metrics", promhttp.Handler()).Methods("GET") + + return router, nil +} + +func (r *Router) Close() error { + return nil +} + +type RootHandler struct { + logger *zap.Logger +} + +func NewRootHandler(logger *zap.Logger) *RootHandler { + return &RootHandler{ + logger: logger, + } +} + +func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + start := time.Now() + status := http.StatusOK + content := make([]byte, 0) + + defer blasthttp.RecordMetrics(start, status, w, r) + + msgMap := map[string]interface{}{ + "version": version.Version, + "status": status, + } + + content, err := blasthttp.NewJSONMessage(msgMap) + if err != nil { + h.logger.Error(err.Error()) + } + + blasthttp.WriteResponse(w, content, status, h.logger) +} diff --git a/manager/http_router.go b/manager/http_router.go deleted file mode 100644 index be7ca13..0000000 --- a/manager/http_router.go +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "encoding/json" - "io/ioutil" - "net/http" - "time" - - "github.com/gorilla/mux" - blasterrors "github.com/mosuka/blast/errors" - blasthttp "github.com/mosuka/blast/http" - "github.com/mosuka/blast/version" - "github.com/prometheus/client_golang/prometheus/promhttp" - "go.uber.org/zap" -) - -type Router struct { - mux.Router - - GRPCClient *GRPCClient - logger *zap.Logger -} - -func NewRouter(grpcAddr string, logger *zap.Logger) (*Router, error) { - grpcClient, err := NewGRPCClient(grpcAddr) - if err != nil { - return nil, err - } - - router := &Router{ - GRPCClient: grpcClient, - logger: logger, - } - - router.StrictSlash(true) - - router.Handle("/", NewRootHandler(logger)).Methods("GET") - router.Handle("/configs", NewPutHandler(router.GRPCClient, logger)).Methods("PUT") - router.Handle("/configs", NewGetHandler(router.GRPCClient, logger)).Methods("GET") - router.Handle("/configs", NewDeleteHandler(router.GRPCClient, logger)).Methods("DELETE") - router.Handle("/configs/{path:.*}", NewPutHandler(router.GRPCClient, logger)).Methods("PUT") - router.Handle("/configs/{path:.*}", NewGetHandler(router.GRPCClient, logger)).Methods("GET") - router.Handle("/configs/{path:.*}", NewDeleteHandler(router.GRPCClient, logger)).Methods("DELETE") - router.Handle("/metrics", promhttp.Handler()).Methods("GET") - - return router, nil -} - -func (r *Router) Close() error { - r.GRPCClient.Cancel() - - err := r.GRPCClient.Close() - if err != nil { - return err - } - - return nil -} - -type RootHandler struct { - logger *zap.Logger -} - -func NewRootHandler(logger *zap.Logger) *RootHandler { - return &RootHandler{ - logger: logger, - } -} - -func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - msgMap := map[string]interface{}{ - "version": version.Version, - "status": status, - } - - content, err := blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type GetHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewGetHandler(client *GRPCClient, logger *zap.Logger) *GetHandler { - return &GetHandler{ - client: client, - logger: logger, - } -} - -func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - vars := mux.Vars(r) - - key := vars["path"] - - value, err := h.client.Get(key) - if err != nil { - switch err { - case blasterrors.ErrNotFound: - status = http.StatusNotFound - default: - status = http.StatusInternalServerError - } - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // interface{} -> []byte - content, err = json.MarshalIndent(value, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type PutHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewPutHandler(client *GRPCClient, logger *zap.Logger) *PutHandler { - return &PutHandler{ - client: client, - logger: logger, - } -} - -func (h *PutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - vars := mux.Vars(r) - - key := vars["path"] - - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // string -> map[string]interface{} - var value interface{} - err = json.Unmarshal(bodyBytes, &value) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - err = h.client.Set(key, value) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type DeleteHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewDeleteHandler(client *GRPCClient, logger *zap.Logger) *DeleteHandler { - return &DeleteHandler{ - client: client, - logger: logger, - } -} - -func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - vars := mux.Vars(r) - - key := vars["path"] - - err := h.client.Delete(key) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} diff --git a/manager/http_server.go b/manager/http_server.go index f64b7c6..33bd0fc 100644 --- a/manager/http_server.go +++ b/manager/http_server.go @@ -15,7 +15,6 @@ package manager import ( - "fmt" "net" "net/http" @@ -68,18 +67,3 @@ func (s *HTTPServer) Stop() error { return nil } - -func (s *HTTPServer) GetAddress() (string, error) { - tcpAddr, err := net.ResolveTCPAddr("tcp", s.listener.Addr().String()) - if err != nil { - return "", err - } - - v4Addr := "" - if tcpAddr.IP.To4() != nil { - v4Addr = tcpAddr.IP.To4().String() - } - port := tcpAddr.Port - - return fmt.Sprintf("%s:%d", v4Addr, port), nil -} diff --git a/manager/raft_command.go b/manager/raft_command.go deleted file mode 100644 index 97fa3df..0000000 --- a/manager/raft_command.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import "encoding/json" - -type command int - -const ( - unknown command = iota - setNode - deleteNode - setKeyValue - deleteKeyValue -) - -type message struct { - Command command `json:"command,omitempty"` - Data json.RawMessage `json:"data,omitempty"` -} - -func newMessage(cmd command, data interface{}) (*message, error) { - b, err := json.Marshal(data) - if err != nil { - return nil, err - } - return &message{ - Command: cmd, - Data: b, - }, nil -} diff --git a/manager/raft_fsm.go b/manager/raft_fsm.go index 325042d..bfd859f 100644 --- a/manager/raft_fsm.go +++ b/manager/raft_fsm.go @@ -21,9 +21,11 @@ import ( "io/ioutil" "sync" + "github.com/gogo/protobuf/proto" "github.com/hashicorp/raft" blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/maputils" + "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/management" "go.uber.org/zap" ) @@ -95,6 +97,7 @@ func (f *RaftFSM) DeleteNode(nodeId string) error { } func (f *RaftFSM) GetValue(key string) (interface{}, error) { + // get raw data value, err := f.data.Get(key) if err != nil { switch err { @@ -107,15 +110,7 @@ func (f *RaftFSM) GetValue(key string) (interface{}, error) { } } - var ret interface{} - switch value.(type) { - case maputils.Map: - ret = value.(maputils.Map).ToMap() - default: - ret = value - } - - return ret, nil + return value, nil } func (f *RaftFSM) SetValue(key string, value interface{}, merge bool) error { @@ -157,65 +152,47 @@ type fsmResponse struct { } func (f *RaftFSM) Apply(l *raft.Log) interface{} { - var msg message - err := json.Unmarshal(l.Data, &msg) + proposal := &management.Proposal{} + err := proto.Unmarshal(l.Data, proposal) if err != nil { f.logger.Error(err.Error()) return err } - switch msg.Command { - case setNode: - var data map[string]interface{} - err := json.Unmarshal(msg.Data, &data) + switch proposal.Event { + case management.Proposal_SET_NODE: + err = f.SetNode(proposal.Node) if err != nil { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - b, err := json.Marshal(data["node"]) + return &fsmResponse{error: nil} + case management.Proposal_DELETE_NODE: + err = f.DeleteNode(proposal.Node.Id) if err != nil { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - var node *management.Node - err = json.Unmarshal(b, &node) + return &fsmResponse{error: nil} + case management.Proposal_SET_VALUE: + value, err := protobuf.MarshalAny(proposal.KeyValue.Value) if err != nil { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - err = f.SetNode(node) + err = f.SetValue(proposal.KeyValue.Key, value, false) if err != nil { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - return &fsmResponse{error: err} - case deleteNode: - var data map[string]interface{} - err := json.Unmarshal(msg.Data, &data) + return &fsmResponse{error: nil} + case management.Proposal_DELETE_VALUE: + err = f.DeleteValue(proposal.KeyValue.Key) if err != nil { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - err = f.DeleteNode(data["id"].(string)) - return &fsmResponse{error: err} - case setKeyValue: - var data map[string]interface{} - err := json.Unmarshal(msg.Data, &data) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - err = f.SetValue(data["key"].(string), data["value"], false) - return &fsmResponse{error: err} - case deleteKeyValue: - var data map[string]interface{} - err := json.Unmarshal(msg.Data, &data) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - err = f.DeleteValue(data["key"].(string)) - return &fsmResponse{error: err} + return &fsmResponse{error: nil} default: err = errors.New("unsupported command") f.logger.Error(err.Error()) diff --git a/manager/raft_fsm_test.go b/manager/raft_fsm_test.go index ca0ad48..86f70ba 100644 --- a/manager/raft_fsm_test.go +++ b/manager/raft_fsm_test.go @@ -20,6 +20,7 @@ import ( "reflect" "testing" + "github.com/google/go-cmp/cmp" "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/protobuf/management" ) @@ -360,7 +361,7 @@ func TestRaftFSM_Get(t *testing.T) { expectedValue := 1 actualValue := value - if expectedValue != actualValue { + if !cmp.Equal(expectedValue, actualValue) { t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) } } @@ -395,9 +396,7 @@ func TestRaftFSM_Set(t *testing.T) { } // set {"a": 1} - err = fsm.SetValue("/", map[string]interface{}{ - "a": 1, - }, false) + err = fsm.SetValue("/", map[string]interface{}{"a": 1}, false) if err != nil { t.Fatalf("%v", err) } @@ -405,36 +404,26 @@ func TestRaftFSM_Set(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - exp1 := map[string]interface{}{ - "a": 1, - } + exp1 := map[string]interface{}{"a": 1} act1 := val1 if !reflect.DeepEqual(exp1, act1) { t.Fatalf("expected content to see %v, saw %v", exp1, act1) } // merge {"a": "A"} - _ = fsm.SetValue("/", map[string]interface{}{ - "a": "A", - }, true) + _ = fsm.SetValue("/", map[string]interface{}{"a": "A"}, true) val2, err := fsm.GetValue("/") if err != nil { t.Fatalf("%v", err) } - exp2 := map[string]interface{}{ - "a": "A", - } + exp2 := map[string]interface{}{"a": "A"} act2 := val2 if !reflect.DeepEqual(exp2, act2) { t.Fatalf("expected content to see %v, saw %v", exp2, act2) } // set {"a": {"b": "AB"}} - err = fsm.SetValue("/", map[string]interface{}{ - "a": map[string]interface{}{ - "b": "AB", - }, - }, false) + err = fsm.SetValue("/", map[string]interface{}{"a": map[string]interface{}{"b": "AB"}}, false) if err != nil { t.Fatalf("%v", err) } @@ -443,22 +432,14 @@ func TestRaftFSM_Set(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - exp3 := map[string]interface{}{ - "a": map[string]interface{}{ - "b": "AB", - }, - } + exp3 := map[string]interface{}{"a": map[string]interface{}{"b": "AB"}} act3 := val3 if !reflect.DeepEqual(exp3, act3) { t.Fatalf("expected content to see %v, saw %v", exp3, act3) } // merge {"a": {"c": "AC"}} - err = fsm.SetValue("/", map[string]interface{}{ - "a": map[string]interface{}{ - "c": "AC", - }, - }, true) + err = fsm.SetValue("/", map[string]interface{}{"a": map[string]interface{}{"c": "AC"}}, true) if err != nil { t.Fatalf("%v", err) } @@ -466,21 +447,14 @@ func TestRaftFSM_Set(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - exp4 := map[string]interface{}{ - "a": map[string]interface{}{ - "b": "AB", - "c": "AC", - }, - } + exp4 := map[string]interface{}{"a": map[string]interface{}{"b": "AB", "c": "AC"}} act4 := val4 if !reflect.DeepEqual(exp4, act4) { t.Fatalf("expected content to see %v, saw %v", exp4, act4) } // set {"a": 1} - err = fsm.SetValue("/", map[string]interface{}{ - "a": 1, - }, false) + err = fsm.SetValue("/", map[string]interface{}{"a": 1}, false) if err != nil { t.Fatalf("%v", err) } @@ -488,9 +462,7 @@ func TestRaftFSM_Set(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - exp5 := map[string]interface{}{ - "a": 1, - } + exp5 := map[string]interface{}{"a": 1} act5 := val5 if !reflect.DeepEqual(exp5, act5) { t.Fatalf("expected content to see %v, saw %v", exp5, act5) @@ -546,6 +518,7 @@ func TestRaftFSM_Delete(t *testing.T) { t.Fatalf("%v", err) } + // set {"a": 1} err = fsm.SetValue("/", map[string]interface{}{"a": 1}, false) if err != nil { t.Fatalf("%v", err) @@ -558,7 +531,7 @@ func TestRaftFSM_Delete(t *testing.T) { expectedValue := 1 actualValue := value - if expectedValue != actualValue { + if !cmp.Equal(expectedValue, actualValue) { t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) } diff --git a/manager/raft_server.go b/manager/raft_server.go index 46bc079..c169e31 100644 --- a/manager/raft_server.go +++ b/manager/raft_server.go @@ -25,11 +25,14 @@ import ( "time" "github.com/blevesearch/bleve/mapping" + "github.com/gogo/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" "github.com/hashicorp/raft" raftboltdb "github.com/hashicorp/raft-boltdb" raftbadgerdb "github.com/markthethomas/raft-badger" _ "github.com/mosuka/blast/builtins" blasterrors "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/management" "go.uber.org/zap" //raftmdb "github.com/hashicorp/raft-mdb" @@ -354,24 +357,17 @@ func (s *RaftServer) getNode(nodeId string) (*management.Node, error) { } func (s *RaftServer) setNode(node *management.Node) error { - msg, err := newMessage( - setNode, - map[string]interface{}{ - "node": node, - }, - ) - if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) - return err + proposal := &management.Proposal{ + Event: management.Proposal_SET_NODE, + Node: node, } - - msgBytes, err := json.Marshal(msg) + proposalByte, err := proto.Marshal(proposal) if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) + s.logger.Error(err.Error()) return err } - f := s.raft.Apply(msgBytes, 10*time.Second) + f := s.raft.Apply(proposalByte, 10*time.Second) err = f.Error() if err != nil { s.logger.Error(err.Error(), zap.Any("node", node)) @@ -387,24 +383,19 @@ func (s *RaftServer) setNode(node *management.Node) error { } func (s *RaftServer) deleteNode(nodeId string) error { - msg, err := newMessage( - deleteNode, - map[string]interface{}{ - "id": nodeId, + proposal := &management.Proposal{ + Event: management.Proposal_DELETE_NODE, + Node: &management.Node{ + Id: nodeId, }, - ) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err } - - msgBytes, err := json.Marshal(msg) + proposalByte, err := proto.Marshal(proposal) if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) + s.logger.Error(err.Error()) return err } - f := s.raft.Apply(msgBytes, 10*time.Second) + f := s.raft.Apply(proposalByte, 10*time.Second) err = f.Error() if err != nil { s.logger.Error(err.Error(), zap.String("id", nodeId)) @@ -577,25 +568,27 @@ func (s *RaftServer) SetValue(key string, value interface{}) error { return raft.ErrNotLeader } - msg, err := newMessage( - setKeyValue, - map[string]interface{}{ - "key": key, - "value": value, - }, - ) + valueAny := &any.Any{} + err := protobuf.UnmarshalAny(value, valueAny) if err != nil { s.logger.Error(err.Error()) return err } - msgBytes, err := json.Marshal(msg) + proposal := &management.Proposal{ + Event: management.Proposal_SET_VALUE, + KeyValue: &management.KeyValue{ + Key: key, + Value: valueAny, + }, + } + proposalByte, err := proto.Marshal(proposal) if err != nil { s.logger.Error(err.Error()) return err } - f := s.raft.Apply(msgBytes, 10*time.Second) + f := s.raft.Apply(proposalByte, 10*time.Second) err = f.Error() if err != nil { s.logger.Error(err.Error()) @@ -616,24 +609,19 @@ func (s *RaftServer) DeleteValue(key string) error { return raft.ErrNotLeader } - msg, err := newMessage( - deleteKeyValue, - map[string]interface{}{ - "key": key, + proposal := &management.Proposal{ + Event: management.Proposal_DELETE_VALUE, + KeyValue: &management.KeyValue{ + Key: key, }, - ) - if err != nil { - s.logger.Error(err.Error()) - return err } - - msgBytes, err := json.Marshal(msg) + proposalByte, err := proto.Marshal(proposal) if err != nil { s.logger.Error(err.Error()) return err } - f := s.raft.Apply(msgBytes, 10*time.Second) + f := s.raft.Apply(proposalByte, 10*time.Second) err = f.Error() if err != nil { s.logger.Error(err.Error()) diff --git a/manager/server.go b/manager/server.go index 809d3c4..909b4fc 100644 --- a/manager/server.go +++ b/manager/server.go @@ -36,6 +36,7 @@ type Server struct { raftServer *RaftServer grpcService *GRPCService grpcServer *GRPCServer + grpcGateway *GRPCGateway httpRouter *Router httpServer *HTTPServer } @@ -83,8 +84,15 @@ func (s *Server) Start() { return } + // create gRPC gateway + s.grpcGateway, err = NewGRPCGateway(s.node.Metadata.GrpcGatewayAddress, s.node.Metadata.GrpcAddress, s.logger) + if err != nil { + s.logger.Error(err.Error()) + return + } + // create HTTP router - s.httpRouter, err = NewRouter(s.node.Metadata.GrpcAddress, s.logger) + s.httpRouter, err = NewRouter(s.logger) if err != nil { s.logger.Fatal(err.Error()) return @@ -93,7 +101,7 @@ func (s *Server) Start() { // create HTTP server s.httpServer, err = NewHTTPServer(s.node.Metadata.HttpAddress, s.httpRouter, s.logger, s.httpLogger) if err != nil { - s.logger.Error(err.Error()) + s.logger.Fatal(err.Error()) return } @@ -125,6 +133,12 @@ func (s *Server) Start() { } }() + // start gRPC gateway + s.logger.Info("start gRPC gateway") + go func() { + _ = s.grpcGateway.Start() + }() + // start HTTP server s.logger.Info("start HTTP server") go func() { @@ -145,7 +159,11 @@ func (s *Server) Start() { return } - err = client.ClusterJoin(s.node) + req := &management.ClusterJoinRequest{ + Node: s.node, + } + + _, err = client.ClusterJoin(req) if err != nil { s.logger.Fatal(err.Error()) return @@ -160,11 +178,18 @@ func (s *Server) Stop() { s.logger.Error(err.Error()) } + s.logger.Info("stop HTTP router") err = s.httpRouter.Close() if err != nil { s.logger.Error(err.Error()) } + s.logger.Info("stop gRPC gateway") + err = s.grpcGateway.Stop() + if err != nil { + s.logger.Error(err.Error()) + } + s.logger.Info("stop gRPC server") err = s.grpcServer.Stop() if err != nil { @@ -198,7 +223,7 @@ func (s *Server) GrpcAddress() string { } func (s *Server) HttpAddress() string { - address, err := s.httpServer.GetAddress() + address, err := s.grpcGateway.GetAddress() if err != nil { return "" } diff --git a/manager/server_test.go b/manager/server_test.go index 855a0f1..0b863c3 100644 --- a/manager/server_test.go +++ b/manager/server_test.go @@ -22,9 +22,12 @@ import ( "testing" "time" - blasterrors "github.com/mosuka/blast/errors" + "github.com/golang/protobuf/ptypes/any" + "github.com/golang/protobuf/ptypes/empty" + "github.com/google/go-cmp/cmp" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/logutils" + "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/management" "github.com/mosuka/blast/strutils" "github.com/mosuka/blast/testutils" @@ -39,6 +42,7 @@ func TestServer_Start(t *testing.T) { peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -53,8 +57,9 @@ func TestServer_Start(t *testing.T) { BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -92,6 +97,7 @@ func TestServer_HealthCheck(t *testing.T) { peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -106,8 +112,9 @@ func TestServer_HealthCheck(t *testing.T) { BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -150,34 +157,37 @@ func TestServer_HealthCheck(t *testing.T) { } // healthiness - healthiness, err := client.NodeHealthCheck(management.NodeHealthCheckRequest_HEALTHINESS.String()) + reqHealthiness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_HEALTHINESS} + resHealthiness, err := client.NodeHealthCheck(reqHealthiness) if err != nil { t.Fatalf("%v", err) } - expHealthiness := management.NodeHealthCheckResponse_HEALTHY.String() - actHealthiness := healthiness + expHealthiness := management.NodeHealthCheckResponse_HEALTHY + actHealthiness := resHealthiness.State if expHealthiness != actHealthiness { t.Fatalf("expected content to see %v, saw %v", expHealthiness, actHealthiness) } // liveness - liveness, err := client.NodeHealthCheck(management.NodeHealthCheckRequest_LIVENESS.String()) + reqLiveness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_LIVENESS} + resLiveness, err := client.NodeHealthCheck(reqLiveness) if err != nil { t.Fatalf("%v", err) } - expLiveness := management.NodeHealthCheckResponse_ALIVE.String() - actLiveness := liveness + expLiveness := management.NodeHealthCheckResponse_ALIVE + actLiveness := resLiveness.State if expLiveness != actLiveness { t.Fatalf("expected content to see %v, saw %v", expLiveness, actLiveness) } // readiness - readiness, err := client.NodeHealthCheck(management.NodeHealthCheckRequest_READINESS.String()) + reqReadiness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_READINESS} + resReadiness, err := client.NodeHealthCheck(reqReadiness) if err != nil { t.Fatalf("%v", err) } - expReadiness := management.NodeHealthCheckResponse_READY.String() - actReadiness := readiness + expReadiness := management.NodeHealthCheckResponse_READY + actReadiness := resReadiness.State if expReadiness != actReadiness { t.Fatalf("expected content to see %v, saw %v", expReadiness, actReadiness) } @@ -192,6 +202,7 @@ func TestServer_GetNode(t *testing.T) { peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewawyAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -206,8 +217,9 @@ func TestServer_GetNode(t *testing.T) { BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewawyAddress, + HttpAddress: httpAddress, }, } @@ -250,7 +262,7 @@ func TestServer_GetNode(t *testing.T) { } // get node - nodeInfo, err := client.NodeInfo() + res, err := client.NodeInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -259,11 +271,12 @@ func TestServer_GetNode(t *testing.T) { BindAddress: bindAddress, State: management.Node_LEADER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewawyAddress, + HttpAddress: httpAddress, }, } - actNodeInfo := nodeInfo + actNodeInfo := res.Node if !reflect.DeepEqual(expNodeInfo, actNodeInfo) { t.Fatalf("expected content to see %v, saw %v", expNodeInfo, actNodeInfo) } @@ -278,6 +291,7 @@ func TestServer_GetCluster(t *testing.T) { peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -292,8 +306,9 @@ func TestServer_GetCluster(t *testing.T) { BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -336,7 +351,7 @@ func TestServer_GetCluster(t *testing.T) { } // get cluster - cluster, err := client.ClusterInfo() + res, err := client.ClusterInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -347,19 +362,20 @@ func TestServer_GetCluster(t *testing.T) { BindAddress: bindAddress, State: management.Node_LEADER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, }, }, } - actCluster := cluster + actCluster := res.Cluster if !reflect.DeepEqual(expCluster, actCluster) { t.Fatalf("expected content to see %v, saw %v", expCluster, actCluster) } } -func TestServer_SetState(t *testing.T) { +func TestServer_Set(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -368,6 +384,7 @@ func TestServer_SetState(t *testing.T) { peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -382,8 +399,9 @@ func TestServer_SetState(t *testing.T) { BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -426,27 +444,40 @@ func TestServer_SetState(t *testing.T) { } // set value - err = client.Set("test/key1", "val1") + valueAny := &any.Any{} + err = protobuf.UnmarshalAny("val1", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq := &management.SetRequest{ + Key: "test/key1", + Value: valueAny, + } + _, err = client.Set(setReq) if err != nil { t.Fatalf("%v", err) } // get value - val1, err := client.Get("test/key1") + getReq := &management.GetRequest{ + Key: "test/key1", + } + getRes, err := client.Get(getReq) if err != nil { t.Fatalf("%v", err) } expVal1 := "val1" + val1, err := protobuf.MarshalAny(getRes.Value) actVal1 := *val1.(*string) - if expVal1 != actVal1 { + if !cmp.Equal(expVal1, actVal1) { t.Fatalf("expected content to see %v, saw %v", expVal1, actVal1) } } -func TestServer_GetState(t *testing.T) { +func TestServer_Get(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -455,6 +486,7 @@ func TestServer_GetState(t *testing.T) { peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -469,8 +501,9 @@ func TestServer_GetState(t *testing.T) { BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -513,27 +546,38 @@ func TestServer_GetState(t *testing.T) { } // set value - err = client.Set("test/key1", "val1") + valueAny := &any.Any{} + err = protobuf.UnmarshalAny("val1", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq := &management.SetRequest{ + Key: "test/key1", + Value: valueAny, + } + _, err = client.Set(setReq) if err != nil { t.Fatalf("%v", err) } // get value - val1, err := client.Get("test/key1") + getReq := &management.GetRequest{Key: "test/key1"} + getRes, err := client.Get(getReq) if err != nil { t.Fatalf("%v", err) } expVal1 := "val1" + val1, err := protobuf.MarshalAny(getRes.Value) actVal1 := *val1.(*string) - if expVal1 != actVal1 { + if !cmp.Equal(expVal1, actVal1) { t.Fatalf("expected content to see %v, saw %v", expVal1, actVal1) } } -func TestServer_DeleteState(t *testing.T) { +func TestServer_Delete(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -542,6 +586,7 @@ func TestServer_DeleteState(t *testing.T) { peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -556,8 +601,9 @@ func TestServer_DeleteState(t *testing.T) { BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -600,43 +646,53 @@ func TestServer_DeleteState(t *testing.T) { } // set value - err = client.Set("test/key1", "val1") + valueAny := &any.Any{} + if err != nil { + t.Fatalf("%v", err) + } + err = protobuf.UnmarshalAny("val1", valueAny) + setReq := &management.SetRequest{ + Key: "test/key1", + Value: valueAny, + } + _, err = client.Set(setReq) if err != nil { t.Fatalf("%v", err) } // get value - val1, err := client.Get("test/key1") + getReq := &management.GetRequest{ + Key: "test/key1", + } + res, err := client.Get(getReq) if err != nil { t.Fatalf("%v", err) } expVal1 := "val1" + val1, err := protobuf.MarshalAny(res.Value) actVal1 := *val1.(*string) - if expVal1 != actVal1 { + if !cmp.Equal(expVal1, actVal1) { t.Fatalf("expected content to see %v, saw %v", expVal1, actVal1) } // delete value - err = client.Delete("test/key1") - if err != nil { - t.Fatalf("%v", err) + deleteReq := &management.DeleteRequest{ + Key: "test/key1", } - - val1, err = client.Get("test/key1") - if err != blasterrors.ErrNotFound { - t.Fatalf("%v", err) - } - - if val1 != nil { + _, err = client.Delete(deleteReq) + if err != nil { t.Fatalf("%v", err) } // delete non-existing data - err = client.Delete("test/non-existing") - if err != blasterrors.ErrNotFound { + deleteNonExistingReq := &management.DeleteRequest{ + Key: "test/non-existing", + } + _, err = client.Delete(deleteNonExistingReq) + if err != nil { t.Fatalf("%v", err) } } @@ -650,6 +706,7 @@ func TestCluster_Start(t *testing.T) { peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -664,8 +721,9 @@ func TestCluster_Start(t *testing.T) { BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -692,6 +750,7 @@ func TestCluster_Start(t *testing.T) { peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -706,8 +765,9 @@ func TestCluster_Start(t *testing.T) { BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -734,6 +794,7 @@ func TestCluster_Start(t *testing.T) { peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -748,8 +809,9 @@ func TestCluster_Start(t *testing.T) { BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -787,6 +849,7 @@ func TestCluster_HealthCheck(t *testing.T) { peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -801,8 +864,9 @@ func TestCluster_HealthCheck(t *testing.T) { BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -829,6 +893,7 @@ func TestCluster_HealthCheck(t *testing.T) { peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -843,8 +908,9 @@ func TestCluster_HealthCheck(t *testing.T) { BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -871,6 +937,7 @@ func TestCluster_HealthCheck(t *testing.T) { peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -885,8 +952,9 @@ func TestCluster_HealthCheck(t *testing.T) { BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -937,101 +1005,105 @@ func TestCluster_HealthCheck(t *testing.T) { t.Fatalf("%v", err) } + reqHealtiness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_HEALTHINESS} + reqLiveness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_LIVENESS} + reqReadiness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_READINESS} + // healthiness - healthiness1, err := client1.NodeHealthCheck(management.NodeHealthCheckRequest_HEALTHINESS.String()) + resHealthiness1, err := client1.NodeHealthCheck(reqHealtiness) if err != nil { t.Fatalf("%v", err) } - expHealthiness1 := management.NodeHealthCheckResponse_HEALTHY.String() - actHealthiness1 := healthiness1 + expHealthiness1 := management.NodeHealthCheckResponse_HEALTHY + actHealthiness1 := resHealthiness1.State if expHealthiness1 != actHealthiness1 { t.Fatalf("expected content to see %v, saw %v", expHealthiness1, actHealthiness1) } // liveness - liveness1, err := client1.NodeHealthCheck(management.NodeHealthCheckRequest_LIVENESS.String()) + resLiveness1, err := client1.NodeHealthCheck(reqLiveness) if err != nil { t.Fatalf("%v", err) } - expLiveness1 := management.NodeHealthCheckResponse_ALIVE.String() - actLiveness1 := liveness1 + expLiveness1 := management.NodeHealthCheckResponse_ALIVE + actLiveness1 := resLiveness1.State if expLiveness1 != actLiveness1 { t.Fatalf("expected content to see %v, saw %v", expLiveness1, actLiveness1) } // readiness - readiness1, err := client1.NodeHealthCheck(management.NodeHealthCheckRequest_READINESS.String()) + resReadiness1, err := client1.NodeHealthCheck(reqReadiness) if err != nil { t.Fatalf("%v", err) } - expReadiness1 := management.NodeHealthCheckResponse_READY.String() - actReadiness1 := readiness1 + expReadiness1 := management.NodeHealthCheckResponse_READY + actReadiness1 := resReadiness1.State if expReadiness1 != actReadiness1 { t.Fatalf("expected content to see %v, saw %v", expReadiness1, actReadiness1) } // healthiness - healthiness2, err := client2.NodeHealthCheck(management.NodeHealthCheckRequest_HEALTHINESS.String()) + resHealthiness2, err := client2.NodeHealthCheck(reqHealtiness) if err != nil { t.Fatalf("%v", err) } - expHealthiness2 := management.NodeHealthCheckResponse_HEALTHY.String() - actHealthiness2 := healthiness2 + expHealthiness2 := management.NodeHealthCheckResponse_HEALTHY + actHealthiness2 := resHealthiness2.State if expHealthiness2 != actHealthiness2 { t.Fatalf("expected content to see %v, saw %v", expHealthiness2, actHealthiness2) } // liveness - liveness2, err := client2.NodeHealthCheck(management.NodeHealthCheckRequest_LIVENESS.String()) + resLiveness2, err := client2.NodeHealthCheck(reqLiveness) if err != nil { t.Fatalf("%v", err) } - expLiveness2 := management.NodeHealthCheckResponse_ALIVE.String() - actLiveness2 := liveness2 + expLiveness2 := management.NodeHealthCheckResponse_ALIVE + actLiveness2 := resLiveness2.State if expLiveness2 != actLiveness2 { t.Fatalf("expected content to see %v, saw %v", expLiveness2, actLiveness2) } // readiness - readiness2, err := client2.NodeHealthCheck(management.NodeHealthCheckRequest_READINESS.String()) + resReadiness2, err := client2.NodeHealthCheck(reqReadiness) if err != nil { t.Fatalf("%v", err) } - expReadiness2 := management.NodeHealthCheckResponse_READY.String() - actReadiness2 := readiness2 + expReadiness2 := management.NodeHealthCheckResponse_READY + actReadiness2 := resReadiness2.State if expReadiness2 != actReadiness2 { t.Fatalf("expected content to see %v, saw %v", expReadiness2, actReadiness2) } // healthiness - healthiness3, err := client3.NodeHealthCheck(management.NodeHealthCheckRequest_HEALTHINESS.String()) + resHealthiness3, err := client3.NodeHealthCheck(reqHealtiness) if err != nil { t.Fatalf("%v", err) } - expHealthiness3 := management.NodeHealthCheckResponse_HEALTHY.String() - actHealthiness3 := healthiness3 + expHealthiness3 := management.NodeHealthCheckResponse_HEALTHY + actHealthiness3 := resHealthiness3.State if expHealthiness3 != actHealthiness3 { t.Fatalf("expected content to see %v, saw %v", expHealthiness3, actHealthiness3) } // liveness - liveness3, err := client3.NodeHealthCheck(management.NodeHealthCheckRequest_LIVENESS.String()) + resLiveness3, err := client3.NodeHealthCheck(reqLiveness) if err != nil { t.Fatalf("%v", err) } - expLiveness3 := management.NodeHealthCheckResponse_ALIVE.String() - actLiveness3 := liveness3 + expLiveness3 := management.NodeHealthCheckResponse_ALIVE + actLiveness3 := resLiveness3.State if expLiveness3 != actLiveness3 { t.Fatalf("expected content to see %v, saw %v", expLiveness3, actLiveness3) } // readiness - readiness3, err := client3.NodeHealthCheck(management.NodeHealthCheckRequest_READINESS.String()) + resReadiness3, err := client3.NodeHealthCheck(reqReadiness) if err != nil { t.Fatalf("%v", err) } - expReadiness3 := management.NodeHealthCheckResponse_READY.String() - actReadiness3 := readiness3 + expReadiness3 := management.NodeHealthCheckResponse_READY + actReadiness3 := resReadiness3.State if expReadiness3 != actReadiness3 { t.Fatalf("expected content to see %v, saw %v", expReadiness3, actReadiness3) } @@ -1046,6 +1118,7 @@ func TestCluster_GetNode(t *testing.T) { peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1060,8 +1133,9 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -1088,6 +1162,7 @@ func TestCluster_GetNode(t *testing.T) { peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1102,8 +1177,9 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -1130,6 +1206,7 @@ func TestCluster_GetNode(t *testing.T) { peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1144,8 +1221,9 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -1197,7 +1275,8 @@ func TestCluster_GetNode(t *testing.T) { } // get all node info from all nodes - node11, err := client1.NodeInfo() + req := &empty.Empty{} + resNodeInfo11, err := client1.NodeInfo(req) if err != nil { t.Fatalf("%v", err) } @@ -1206,16 +1285,17 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress1, State: management.Node_LEADER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } - actNode11 := node11 + actNode11 := resNodeInfo11.Node if !reflect.DeepEqual(expNode11, actNode11) { t.Fatalf("expected content to see %v, saw %v", expNode11, actNode11) } - node21, err := client2.NodeInfo() + resNodeInfo21, err := client2.NodeInfo(req) if err != nil { t.Fatalf("%v", err) } @@ -1224,16 +1304,17 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress2, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } - actNode21 := node21 + actNode21 := resNodeInfo21.Node if !reflect.DeepEqual(expNode21, actNode21) { t.Fatalf("expected content to see %v, saw %v", expNode21, actNode21) } - node31, err := client3.NodeInfo() + resNodeInfo31, err := client3.NodeInfo(req) if err != nil { t.Fatalf("%v", err) } @@ -1242,11 +1323,12 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress3, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } - actNode31 := node31 + actNode31 := resNodeInfo31.Node if !reflect.DeepEqual(expNode31, actNode31) { t.Fatalf("expected content to see %v, saw %v", expNode31, actNode31) } @@ -1261,6 +1343,7 @@ func TestCluster_GetCluster(t *testing.T) { peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1275,8 +1358,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -1303,6 +1387,7 @@ func TestCluster_GetCluster(t *testing.T) { peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1317,8 +1402,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -1345,6 +1431,7 @@ func TestCluster_GetCluster(t *testing.T) { peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1359,8 +1446,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -1412,7 +1500,8 @@ func TestCluster_GetCluster(t *testing.T) { } // get cluster info from manager1 - cluster1, err := client1.ClusterInfo() + req := &empty.Empty{} + resClusterInfo1, err := client1.ClusterInfo(req) if err != nil { t.Fatalf("%v", err) } @@ -1423,8 +1512,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress1, State: management.Node_LEADER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, }, nodeId2: { @@ -1432,8 +1522,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress2, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, }, nodeId3: { @@ -1441,18 +1532,19 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress3, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, }, }, } - actCluster1 := cluster1 + actCluster1 := resClusterInfo1.Cluster if !reflect.DeepEqual(expCluster1, actCluster1) { t.Fatalf("expected content to see %v, saw %v", expCluster1, actCluster1) } - cluster2, err := client2.ClusterInfo() + resClusterInfo2, err := client2.ClusterInfo(req) if err != nil { t.Fatalf("%v", err) } @@ -1463,8 +1555,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress1, State: management.Node_LEADER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, }, nodeId2: { @@ -1472,8 +1565,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress2, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, }, nodeId3: { @@ -1481,18 +1575,19 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress3, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, }, }, } - actCluster2 := cluster2 + actCluster2 := resClusterInfo2.Cluster if !reflect.DeepEqual(expCluster2, actCluster2) { t.Fatalf("expected content to see %v, saw %v", expCluster2, actCluster2) } - cluster3, err := client3.ClusterInfo() + resClusterInfo3, err := client3.ClusterInfo(req) if err != nil { t.Fatalf("%v", err) } @@ -1503,8 +1598,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress1, State: management.Node_LEADER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, }, nodeId2: { @@ -1512,8 +1608,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress2, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, }, nodeId3: { @@ -1521,19 +1618,20 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress3, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, }, }, } - actCluster3 := cluster3 + actCluster3 := resClusterInfo3.Cluster if !reflect.DeepEqual(expCluster3, actCluster3) { t.Fatalf("expected content to see %v, saw %v", expCluster3, actCluster3) } } -func TestCluster_SetState(t *testing.T) { +func TestCluster_Set(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -1542,8 +1640,9 @@ func TestCluster_SetState(t *testing.T) { peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + nodeId1 := "node-1" bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() defer func() { @@ -1556,8 +1655,9 @@ func TestCluster_SetState(t *testing.T) { BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -1582,10 +1682,14 @@ func TestCluster_SetState(t *testing.T) { // start server server1.Start() + // sleep + time.Sleep(5 * time.Second) + peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + nodeId2 := "node-2" bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() defer func() { @@ -1598,8 +1702,9 @@ func TestCluster_SetState(t *testing.T) { BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -1624,10 +1729,14 @@ func TestCluster_SetState(t *testing.T) { // start server server2.Start() + // sleep + time.Sleep(5 * time.Second) + peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + nodeId3 := "node-3" bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() defer func() { @@ -1640,8 +1749,9 @@ func TestCluster_SetState(t *testing.T) { BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -1692,113 +1802,185 @@ func TestCluster_SetState(t *testing.T) { t.Fatalf("%v", err) } - err = client1.Set("test/key1", "val1") + valueAny := &any.Any{} + err = protobuf.UnmarshalAny("val1", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq1 := &management.SetRequest{ + Key: "test/key1", + Value: valueAny, + } + _, err = client1.Set(setReq1) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val11, err := client1.Get("test/key1") + getReq1 := &management.GetRequest{ + Key: "test/key1", + } + getRes11, err := client1.Get(getReq1) + if err != nil { + t.Fatalf("%v", err) + } + val11, err := protobuf.MarshalAny(getRes11.Value) if err != nil { t.Fatalf("%v", err) } expVal11 := "val1" actVal11 := *val11.(*string) - if expVal11 != actVal11 { + if !cmp.Equal(expVal11, actVal11) { t.Fatalf("expected content to see %v, saw %v", expVal11, actVal11) } - val21, err := client2.Get("test/key1") + getRes21, err := client2.Get(getReq1) + if err != nil { + t.Fatalf("%v", err) + } + val21, err := protobuf.MarshalAny(getRes21.Value) if err != nil { t.Fatalf("%v", err) } expVal21 := "val1" actVal21 := *val21.(*string) - if expVal21 != actVal21 { + if !cmp.Equal(expVal21, actVal21) { t.Fatalf("expected content to see %v, saw %v", expVal21, actVal21) } - val31, err := client3.Get("test/key1") + getRes31, err := client3.Get(getReq1) + if err != nil { + t.Fatalf("%v", err) + } + val31, err := protobuf.MarshalAny(getRes31.Value) if err != nil { t.Fatalf("%v", err) } expVal31 := "val1" actVal31 := *val31.(*string) - if expVal31 != actVal31 { + if !cmp.Equal(expVal31, actVal31) { t.Fatalf("expected content to see %v, saw %v", expVal31, actVal31) } - err = client2.Set("test/key2", "val2") + valueAny = &any.Any{} + err = protobuf.UnmarshalAny("val2", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq2 := &management.SetRequest{ + Key: "test/key2", + Value: valueAny, + } + _, err = client2.Set(setReq2) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val12, err := client1.Get("test/key2") + getReq2 := &management.GetRequest{ + Key: "test/key2", + } + getRes12, err := client1.Get(getReq2) + if err != nil { + t.Fatalf("%v", err) + } + val12, err := protobuf.MarshalAny(getRes12.Value) if err != nil { t.Fatalf("%v", err) } expVal12 := "val2" actVal12 := *val12.(*string) - if expVal12 != actVal12 { + if !cmp.Equal(expVal12, actVal12) { t.Fatalf("expected content to see %v, saw %v", expVal12, actVal12) } - val22, err := client2.Get("test/key2") + getRes22, err := client2.Get(getReq2) + if err != nil { + t.Fatalf("%v", err) + } + val22, err := protobuf.MarshalAny(getRes22.Value) if err != nil { t.Fatalf("%v", err) } expVal22 := "val2" actVal22 := *val22.(*string) - if expVal22 != actVal22 { + if !cmp.Equal(expVal22, actVal22) { t.Fatalf("expected content to see %v, saw %v", expVal22, actVal22) } - val32, err := client3.Get("test/key2") + getRes32, err := client3.Get(getReq2) + if err != nil { + t.Fatalf("%v", err) + } + val32, err := protobuf.MarshalAny(getRes32.Value) if err != nil { t.Fatalf("%v", err) } expVal32 := "val2" actVal32 := *val32.(*string) - if expVal32 != actVal32 { + if !cmp.Equal(expVal32, actVal32) { t.Fatalf("expected content to see %v, saw %v", expVal32, actVal32) } - err = client3.Set("test/key3", "val3") + valueAny = &any.Any{} + err = protobuf.UnmarshalAny("val3", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq3 := &management.SetRequest{ + Key: "test/key3", + Value: valueAny, + } + _, err = client3.Set(setReq3) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val13, err := client1.Get("test/key3") + getReq3 := &management.GetRequest{ + Key: "test/key3", + } + getRes13, err := client1.Get(getReq3) + if err != nil { + t.Fatalf("%v", err) + } + val13, err := protobuf.MarshalAny(getRes13.Value) if err != nil { t.Fatalf("%v", err) } expVal13 := "val3" actVal13 := *val13.(*string) - if expVal13 != actVal13 { + if !cmp.Equal(expVal13, actVal13) { t.Fatalf("expected content to see %v, saw %v", expVal13, actVal13) } - val23, err := client2.Get("test/key3") + getRes23, err := client2.Get(getReq3) + if err != nil { + t.Fatalf("%v", err) + } + val23, err := protobuf.MarshalAny(getRes23.Value) if err != nil { t.Fatalf("%v", err) } expVal23 := "val3" actVal23 := *val23.(*string) - if expVal23 != actVal23 { + if !cmp.Equal(expVal23, actVal23) { t.Fatalf("expected content to see %v, saw %v", expVal23, actVal23) } - val33, err := client3.Get("test/key3") + getRes33, err := client3.Get(getReq3) + if err != nil { + t.Fatalf("%v", err) + } + val33, err := protobuf.MarshalAny(getRes33.Value) if err != nil { t.Fatalf("%v", err) } expVal33 := "val3" actVal33 := *val33.(*string) - if expVal33 != actVal33 { + if !cmp.Equal(expVal33, actVal33) { t.Fatalf("expected content to see %v, saw %v", expVal33, actVal33) } } -func TestCluster_GetState(t *testing.T) { +func TestCluster_Get(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -1807,8 +1989,9 @@ func TestCluster_GetState(t *testing.T) { peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + nodeId1 := "node-1" bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() defer func() { @@ -1821,8 +2004,9 @@ func TestCluster_GetState(t *testing.T) { BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -1847,10 +2031,14 @@ func TestCluster_GetState(t *testing.T) { // start server server1.Start() + // sleep + time.Sleep(5 * time.Second) + peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + nodeId2 := "node-2" bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() defer func() { @@ -1863,8 +2051,9 @@ func TestCluster_GetState(t *testing.T) { BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -1889,10 +2078,14 @@ func TestCluster_GetState(t *testing.T) { // start server server2.Start() + // sleep + time.Sleep(5 * time.Second) + peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + nodeId3 := "node-3" bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() defer func() { @@ -1905,8 +2098,9 @@ func TestCluster_GetState(t *testing.T) { BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -1957,113 +2151,185 @@ func TestCluster_GetState(t *testing.T) { t.Fatalf("%v", err) } - err = client1.Set("test/key1", "val1") + valueAny := &any.Any{} + err = protobuf.UnmarshalAny("val1", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq1 := &management.SetRequest{ + Key: "test/key1", + Value: valueAny, + } + _, err = client1.Set(setReq1) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val11, err := client1.Get("test/key1") + getReq1 := &management.GetRequest{ + Key: "test/key1", + } + getRes11, err := client1.Get(getReq1) + if err != nil { + t.Fatalf("%v", err) + } + val11, err := protobuf.MarshalAny(getRes11.Value) if err != nil { t.Fatalf("%v", err) } expVal11 := "val1" actVal11 := *val11.(*string) - if expVal11 != actVal11 { + if !cmp.Equal(expVal11, actVal11) { t.Fatalf("expected content to see %v, saw %v", expVal11, actVal11) } - val21, err := client2.Get("test/key1") + getRes21, err := client2.Get(getReq1) + if err != nil { + t.Fatalf("%v", err) + } + val21, err := protobuf.MarshalAny(getRes21.Value) if err != nil { t.Fatalf("%v", err) } expVal21 := "val1" actVal21 := *val21.(*string) - if expVal21 != actVal21 { + if !cmp.Equal(expVal21, actVal21) { t.Fatalf("expected content to see %v, saw %v", expVal21, actVal21) } - val31, err := client3.Get("test/key1") + getRes31, err := client3.Get(getReq1) + if err != nil { + t.Fatalf("%v", err) + } + val31, err := protobuf.MarshalAny(getRes31.Value) if err != nil { t.Fatalf("%v", err) } expVal31 := "val1" actVal31 := *val31.(*string) - if expVal31 != actVal31 { + if !cmp.Equal(expVal31, actVal31) { t.Fatalf("expected content to see %v, saw %v", expVal31, actVal31) } - err = client2.Set("test/key2", "val2") + valueAny = &any.Any{} + err = protobuf.UnmarshalAny("val2", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq2 := &management.SetRequest{ + Key: "test/key2", + Value: valueAny, + } + _, err = client2.Set(setReq2) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val12, err := client1.Get("test/key2") + getReq2 := &management.GetRequest{ + Key: "test/key2", + } + getRes12, err := client1.Get(getReq2) + if err != nil { + t.Fatalf("%v", err) + } + val12, err := protobuf.MarshalAny(getRes12.Value) if err != nil { t.Fatalf("%v", err) } expVal12 := "val2" actVal12 := *val12.(*string) - if expVal12 != actVal12 { + if !cmp.Equal(expVal12, actVal12) { t.Fatalf("expected content to see %v, saw %v", expVal12, actVal12) } - val22, err := client2.Get("test/key2") + getRes22, err := client2.Get(getReq2) + if err != nil { + t.Fatalf("%v", err) + } + val22, err := protobuf.MarshalAny(getRes22.Value) if err != nil { t.Fatalf("%v", err) } expVal22 := "val2" actVal22 := *val22.(*string) - if expVal22 != actVal22 { + if !cmp.Equal(expVal22, actVal22) { t.Fatalf("expected content to see %v, saw %v", expVal22, actVal22) } - val32, err := client3.Get("test/key2") + getRes32, err := client3.Get(getReq2) + if err != nil { + t.Fatalf("%v", err) + } + val32, err := protobuf.MarshalAny(getRes32.Value) if err != nil { t.Fatalf("%v", err) } expVal32 := "val2" actVal32 := *val32.(*string) - if expVal32 != actVal32 { + if !cmp.Equal(expVal32, actVal32) { t.Fatalf("expected content to see %v, saw %v", expVal32, actVal32) } - err = client3.Set("test/key3", "val3") + valueAny = &any.Any{} + err = protobuf.UnmarshalAny("val3", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq3 := &management.SetRequest{ + Key: "test/key3", + Value: valueAny, + } + _, err = client3.Set(setReq3) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val13, err := client1.Get("test/key3") + getReq3 := &management.GetRequest{ + Key: "test/key3", + } + getRes13, err := client1.Get(getReq3) + if err != nil { + t.Fatalf("%v", err) + } + val13, err := protobuf.MarshalAny(getRes13.Value) if err != nil { t.Fatalf("%v", err) } expVal13 := "val3" actVal13 := *val13.(*string) - if expVal13 != actVal13 { + if !cmp.Equal(expVal13, actVal13) { t.Fatalf("expected content to see %v, saw %v", expVal13, actVal13) } - val23, err := client2.Get("test/key3") + getRes23, err := client2.Get(getReq3) + if err != nil { + t.Fatalf("%v", err) + } + val23, err := protobuf.MarshalAny(getRes23.Value) if err != nil { t.Fatalf("%v", err) } expVal23 := "val3" actVal23 := *val23.(*string) - if expVal23 != actVal23 { + if !cmp.Equal(expVal23, actVal23) { t.Fatalf("expected content to see %v, saw %v", expVal23, actVal23) } - val33, err := client3.Get("test/key3") + getRes33, err := client3.Get(getReq3) + if err != nil { + t.Fatalf("%v", err) + } + val33, err := protobuf.MarshalAny(getRes33.Value) if err != nil { t.Fatalf("%v", err) } expVal33 := "val3" actVal33 := *val33.(*string) - if expVal33 != actVal33 { + if !cmp.Equal(expVal33, actVal33) { t.Fatalf("expected content to see %v, saw %v", expVal33, actVal33) } } -func TestCluster_DeleteState(t *testing.T) { +func TestCluster_Delete(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -2072,8 +2338,9 @@ func TestCluster_DeleteState(t *testing.T) { peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + nodeId1 := "node-1" bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() defer func() { @@ -2086,8 +2353,9 @@ func TestCluster_DeleteState(t *testing.T) { BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -2112,10 +2380,14 @@ func TestCluster_DeleteState(t *testing.T) { // start server server1.Start() + // sleep + time.Sleep(5 * time.Second) + peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + nodeId2 := "node-2" bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() defer func() { @@ -2128,8 +2400,9 @@ func TestCluster_DeleteState(t *testing.T) { BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -2154,10 +2427,14 @@ func TestCluster_DeleteState(t *testing.T) { // start server server2.Start() + // sleep + time.Sleep(5 * time.Second) + peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + nodeId3 := "node-3" bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() defer func() { @@ -2170,8 +2447,9 @@ func TestCluster_DeleteState(t *testing.T) { BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -2222,215 +2500,275 @@ func TestCluster_DeleteState(t *testing.T) { t.Fatalf("%v", err) } - // set test data before delete - err = client1.Set("test/key1", "val1") + valueAny := &any.Any{} + err = protobuf.UnmarshalAny("val1", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq1 := &management.SetRequest{ + Key: "test/key1", + Value: valueAny, + } + _, err = client1.Set(setReq1) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val11, err := client1.Get("test/key1") + getReq1 := &management.GetRequest{ + Key: "test/key1", + } + getRes11, err := client1.Get(getReq1) + if err != nil { + t.Fatalf("%v", err) + } + val11, err := protobuf.MarshalAny(getRes11.Value) if err != nil { t.Fatalf("%v", err) } expVal11 := "val1" actVal11 := *val11.(*string) - if expVal11 != actVal11 { + if !cmp.Equal(expVal11, actVal11) { t.Fatalf("expected content to see %v, saw %v", expVal11, actVal11) } - val21, err := client2.Get("test/key1") + getRes21, err := client2.Get(getReq1) + if err != nil { + t.Fatalf("%v", err) + } + val21, err := protobuf.MarshalAny(getRes21.Value) if err != nil { t.Fatalf("%v", err) } expVal21 := "val1" actVal21 := *val21.(*string) - if expVal21 != actVal21 { + if !cmp.Equal(expVal21, actVal21) { t.Fatalf("expected content to see %v, saw %v", expVal21, actVal21) } - val31, err := client3.Get("test/key1") + getRes31, err := client3.Get(getReq1) + if err != nil { + t.Fatalf("%v", err) + } + val31, err := protobuf.MarshalAny(getRes31.Value) if err != nil { t.Fatalf("%v", err) } expVal31 := "val1" actVal31 := *val31.(*string) - if expVal31 != actVal31 { + if !cmp.Equal(expVal31, actVal31) { t.Fatalf("expected content to see %v, saw %v", expVal31, actVal31) } - err = client2.Set("test/key2", "val2") + valueAny = &any.Any{} + err = protobuf.UnmarshalAny("val2", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq2 := &management.SetRequest{ + Key: "test/key2", + Value: valueAny, + } + _, err = client2.Set(setReq2) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val12, err := client1.Get("test/key2") + getReq2 := &management.GetRequest{ + Key: "test/key2", + } + getRes12, err := client1.Get(getReq2) + if err != nil { + t.Fatalf("%v", err) + } + val12, err := protobuf.MarshalAny(getRes12.Value) if err != nil { t.Fatalf("%v", err) } expVal12 := "val2" actVal12 := *val12.(*string) - if expVal12 != actVal12 { + if !cmp.Equal(expVal12, actVal12) { t.Fatalf("expected content to see %v, saw %v", expVal12, actVal12) } - val22, err := client2.Get("test/key2") + getRes22, err := client2.Get(getReq2) + if err != nil { + t.Fatalf("%v", err) + } + val22, err := protobuf.MarshalAny(getRes22.Value) if err != nil { t.Fatalf("%v", err) } expVal22 := "val2" actVal22 := *val22.(*string) - if expVal22 != actVal22 { + if !cmp.Equal(expVal22, actVal22) { t.Fatalf("expected content to see %v, saw %v", expVal22, actVal22) } - val32, err := client3.Get("test/key2") + getRes32, err := client3.Get(getReq2) + if err != nil { + t.Fatalf("%v", err) + } + val32, err := protobuf.MarshalAny(getRes32.Value) if err != nil { t.Fatalf("%v", err) } expVal32 := "val2" actVal32 := *val32.(*string) - if expVal32 != actVal32 { + if !cmp.Equal(expVal32, actVal32) { t.Fatalf("expected content to see %v, saw %v", expVal32, actVal32) } - err = client3.Set("test/key3", "val3") + valueAny = &any.Any{} + err = protobuf.UnmarshalAny("val3", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq3 := &management.SetRequest{ + Key: "test/key3", + Value: valueAny, + } + _, err = client3.Set(setReq3) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val13, err := client1.Get("test/key3") + getReq3 := &management.GetRequest{ + Key: "test/key3", + } + getRes13, err := client1.Get(getReq3) + if err != nil { + t.Fatalf("%v", err) + } + val13, err := protobuf.MarshalAny(getRes13.Value) if err != nil { t.Fatalf("%v", err) } expVal13 := "val3" actVal13 := *val13.(*string) - if expVal13 != actVal13 { + if !cmp.Equal(expVal13, actVal13) { t.Fatalf("expected content to see %v, saw %v", expVal13, actVal13) } - val23, err := client2.Get("test/key3") + getRes23, err := client2.Get(getReq3) + if err != nil { + t.Fatalf("%v", err) + } + val23, err := protobuf.MarshalAny(getRes23.Value) if err != nil { t.Fatalf("%v", err) } expVal23 := "val3" actVal23 := *val23.(*string) - if expVal23 != actVal23 { + if !cmp.Equal(expVal23, actVal23) { t.Fatalf("expected content to see %v, saw %v", expVal23, actVal23) } - val33, err := client3.Get("test/key3") + getRes33, err := client3.Get(getReq3) + if err != nil { + t.Fatalf("%v", err) + } + val33, err := protobuf.MarshalAny(getRes33.Value) if err != nil { t.Fatalf("%v", err) } expVal33 := "val3" actVal33 := *val33.(*string) - if expVal33 != actVal33 { + if !cmp.Equal(expVal33, actVal33) { t.Fatalf("expected content to see %v, saw %v", expVal33, actVal33) } // delete - err = client1.Delete("test/key1") + deleteReq1 := &management.DeleteRequest{ + Key: "test/key1", + } + _, err = client1.Delete(deleteReq1) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val11, err = client1.Get("test/key1") - if err != blasterrors.ErrNotFound { + getRes11, err = client1.Get(getReq1) + if err != nil { t.Fatalf("%v", err) } - if val11 != nil { + if getRes11.Value != nil { t.Fatalf("%v", err) } - val21, err = client2.Get("test/key1") - if err != blasterrors.ErrNotFound { + getRes21, err = client2.Get(getReq1) + if err != nil { t.Fatalf("%v", err) } - if val21 != nil { + if getRes21.Value != nil { t.Fatalf("%v", err) } - val31, err = client3.Get("test/key1") - if err != blasterrors.ErrNotFound { + getRes31, err = client3.Get(getReq1) + if err != nil { t.Fatalf("%v", err) } - if val31 != nil { + if getRes31.Value != nil { t.Fatalf("%v", err) } - err = client2.Delete("test/key2") + deleteReq2 := &management.DeleteRequest{ + Key: "test/key2", + } + _, err = client2.Delete(deleteReq2) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val12, err = client1.Get("test/key2") - if err != blasterrors.ErrNotFound { + getRes12, err = client1.Get(getReq2) + if err != nil { t.Fatalf("%v", err) } - if val12 != nil { + if getRes12.Value != nil { t.Fatalf("%v", err) } - val22, err = client2.Get("test/key2") - if err != blasterrors.ErrNotFound { + getRes22, err = client2.Get(getReq2) + if err != nil { t.Fatalf("%v", err) } - if val22 != nil { + if getRes22.Value != nil { t.Fatalf("%v", err) } - val32, err = client3.Get("test/key2") - if err != blasterrors.ErrNotFound { + getRes32, err = client3.Get(getReq2) + if err != nil { t.Fatalf("%v", err) } - if val32 != nil { + if getRes32.Value != nil { t.Fatalf("%v", err) } - err = client3.Delete("test/key3") + deleteReq3 := &management.DeleteRequest{ + Key: "test/key2", + } + _, err = client3.Delete(deleteReq3) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate - // get value from all nodes - val13, err = client1.Get("test/key3") - if err != blasterrors.ErrNotFound { - t.Fatalf("%v", err) - } - if val13 != nil { - t.Fatalf("%v", err) - } - val23, err = client2.Get("test/key3") - if err != blasterrors.ErrNotFound { - t.Fatalf("%v", err) - } - if val23 != nil { - t.Fatalf("%v", err) - } - val33, err = client3.Get("test/key3") - if err != blasterrors.ErrNotFound { - t.Fatalf("%v", err) - } - if val33 != nil { - t.Fatalf("%v", err) - } - // delete non-existing data from manager1 - err = client1.Delete("test/non-existing") - if err == nil { + deleteNonExistingReq := &management.DeleteRequest{ + Key: "test/non-existing", + } + _, err = client1.Delete(deleteNonExistingReq) + if err != nil { t.Fatalf("%v", err) } // delete non-existing data from manager2 - err = client2.Delete("test/non-existing") - if err == nil { + _, err = client2.Delete(deleteNonExistingReq) + if err != nil { t.Fatalf("%v", err) } // delete non-existing data from manager3 - err = client3.Delete("test/non-existing") - if err == nil { + _, err = client3.Delete(deleteNonExistingReq) + if err != nil { t.Fatalf("%v", err) } } diff --git a/maputils/maputils.go b/maputils/maputils.go index 36ffe67..a5922fd 100644 --- a/maputils/maputils.go +++ b/maputils/maputils.go @@ -173,7 +173,12 @@ func (m Map) Get(key string) (interface{}, error) { } } - return value, nil + switch value.(type) { + case Map: + return value.(Map).ToMap(), nil + default: + return value, nil + } } func (m Map) Delete(key string) error { diff --git a/maputils/maputils_test.go b/maputils/maputils_test.go index 9fb2bcb..d71e400 100644 --- a/maputils/maputils_test.go +++ b/maputils/maputils_test.go @@ -516,8 +516,8 @@ func Test_Get(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - exp2 := Map{ - "b": Map{ + exp2 := map[string]interface{}{ + "b": map[string]interface{}{ "c": "abc", "d": "abd", }, diff --git a/protobuf/distribute/distribute.pb.go b/protobuf/distribute/distribute.pb.go index d37e934..a942d09 100644 --- a/protobuf/distribute/distribute.pb.go +++ b/protobuf/distribute/distribute.pb.go @@ -8,8 +8,12 @@ import ( fmt "fmt" proto "github.com/golang/protobuf/proto" any "github.com/golang/protobuf/ptypes/any" + empty "github.com/golang/protobuf/ptypes/empty" index "github.com/mosuka/blast/protobuf/index" + _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" math "math" ) @@ -22,26 +26,29 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type NodeHealthCheckRequest_Probe int32 const ( - NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 0 - NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 1 - NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 2 + NodeHealthCheckRequest_UNKNOWN NodeHealthCheckRequest_Probe = 0 + NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 1 + NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 2 + NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 3 ) var NodeHealthCheckRequest_Probe_name = map[int32]string{ - 0: "HEALTHINESS", - 1: "LIVENESS", - 2: "READINESS", + 0: "UNKNOWN", + 1: "HEALTHINESS", + 2: "LIVENESS", + 3: "READINESS", } var NodeHealthCheckRequest_Probe_value = map[string]int32{ - "HEALTHINESS": 0, - "LIVENESS": 1, - "READINESS": 2, + "UNKNOWN": 0, + "HEALTHINESS": 1, + "LIVENESS": 2, + "READINESS": 3, } func (x NodeHealthCheckRequest_Probe) String() string { @@ -55,30 +62,33 @@ func (NodeHealthCheckRequest_Probe) EnumDescriptor() ([]byte, []int) { type NodeHealthCheckResponse_State int32 const ( - NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 0 - NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 1 - NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 2 - NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 3 - NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 4 - NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 5 + NodeHealthCheckResponse_UNKNOWN NodeHealthCheckResponse_State = 0 + NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 1 + NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 2 + NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 3 + NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 4 + NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 5 + NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 6 ) var NodeHealthCheckResponse_State_name = map[int32]string{ - 0: "HEALTHY", - 1: "UNHEALTHY", - 2: "ALIVE", - 3: "DEAD", - 4: "READY", - 5: "NOT_READY", + 0: "UNKNOWN", + 1: "HEALTHY", + 2: "UNHEALTHY", + 3: "ALIVE", + 4: "DEAD", + 5: "READY", + 6: "NOT_READY", } var NodeHealthCheckResponse_State_value = map[string]int32{ - "HEALTHY": 0, - "UNHEALTHY": 1, - "ALIVE": 2, - "DEAD": 3, - "READY": 4, - "NOT_READY": 5, + "UNKNOWN": 0, + "HEALTHY": 1, + "UNHEALTHY": 2, + "ALIVE": 3, + "DEAD": 4, + "READY": 5, + "NOT_READY": 6, } func (x NodeHealthCheckResponse_State) String() string { @@ -125,7 +135,7 @@ func (m *NodeHealthCheckRequest) GetProbe() NodeHealthCheckRequest_Probe { if m != nil { return m.Probe } - return NodeHealthCheckRequest_HEALTHINESS + return NodeHealthCheckRequest_UNKNOWN } type NodeHealthCheckResponse struct { @@ -164,237 +174,323 @@ func (m *NodeHealthCheckResponse) GetState() NodeHealthCheckResponse_State { if m != nil { return m.State } - return NodeHealthCheckResponse_HEALTHY + return NodeHealthCheckResponse_UNKNOWN } -type GetDocumentRequest struct { +type GetRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *GetDocumentRequest) Reset() { *m = GetDocumentRequest{} } -func (m *GetDocumentRequest) String() string { return proto.CompactTextString(m) } -func (*GetDocumentRequest) ProtoMessage() {} -func (*GetDocumentRequest) Descriptor() ([]byte, []int) { +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} +func (*GetRequest) Descriptor() ([]byte, []int) { return fileDescriptor_0b1b3e8a99d31c9c, []int{2} } -func (m *GetDocumentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDocumentRequest.Unmarshal(m, b) +func (m *GetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetRequest.Unmarshal(m, b) } -func (m *GetDocumentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDocumentRequest.Marshal(b, m, deterministic) +func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) } -func (m *GetDocumentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDocumentRequest.Merge(m, src) +func (m *GetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRequest.Merge(m, src) } -func (m *GetDocumentRequest) XXX_Size() int { - return xxx_messageInfo_GetDocumentRequest.Size(m) +func (m *GetRequest) XXX_Size() int { + return xxx_messageInfo_GetRequest.Size(m) } -func (m *GetDocumentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetDocumentRequest.DiscardUnknown(m) +func (m *GetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRequest.DiscardUnknown(m) } -var xxx_messageInfo_GetDocumentRequest proto.InternalMessageInfo +var xxx_messageInfo_GetRequest proto.InternalMessageInfo -func (m *GetDocumentRequest) GetId() string { +func (m *GetRequest) GetId() string { if m != nil { return m.Id } return "" } -type GetDocumentResponse struct { - Document *index.Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type GetResponse struct { + Fields *any.Any `protobuf:"bytes,1,opt,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *GetDocumentResponse) Reset() { *m = GetDocumentResponse{} } -func (m *GetDocumentResponse) String() string { return proto.CompactTextString(m) } -func (*GetDocumentResponse) ProtoMessage() {} -func (*GetDocumentResponse) Descriptor() ([]byte, []int) { +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} +func (*GetResponse) Descriptor() ([]byte, []int) { return fileDescriptor_0b1b3e8a99d31c9c, []int{3} } -func (m *GetDocumentResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDocumentResponse.Unmarshal(m, b) +func (m *GetResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetResponse.Unmarshal(m, b) } -func (m *GetDocumentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDocumentResponse.Marshal(b, m, deterministic) +func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) } -func (m *GetDocumentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDocumentResponse.Merge(m, src) +func (m *GetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResponse.Merge(m, src) } -func (m *GetDocumentResponse) XXX_Size() int { - return xxx_messageInfo_GetDocumentResponse.Size(m) +func (m *GetResponse) XXX_Size() int { + return xxx_messageInfo_GetResponse.Size(m) } -func (m *GetDocumentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetDocumentResponse.DiscardUnknown(m) +func (m *GetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetResponse.DiscardUnknown(m) } -var xxx_messageInfo_GetDocumentResponse proto.InternalMessageInfo +var xxx_messageInfo_GetResponse proto.InternalMessageInfo -func (m *GetDocumentResponse) GetDocument() *index.Document { +func (m *GetResponse) GetFields() *any.Any { if m != nil { - return m.Document + return m.Fields } return nil } -type IndexDocumentRequest struct { - Document *index.Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type IndexRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *IndexDocumentRequest) Reset() { *m = IndexDocumentRequest{} } -func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) } -func (*IndexDocumentRequest) ProtoMessage() {} -func (*IndexDocumentRequest) Descriptor() ([]byte, []int) { +func (m *IndexRequest) Reset() { *m = IndexRequest{} } +func (m *IndexRequest) String() string { return proto.CompactTextString(m) } +func (*IndexRequest) ProtoMessage() {} +func (*IndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor_0b1b3e8a99d31c9c, []int{4} } -func (m *IndexDocumentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexDocumentRequest.Unmarshal(m, b) +func (m *IndexRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IndexRequest.Unmarshal(m, b) } -func (m *IndexDocumentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexDocumentRequest.Marshal(b, m, deterministic) +func (m *IndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IndexRequest.Marshal(b, m, deterministic) } -func (m *IndexDocumentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexDocumentRequest.Merge(m, src) +func (m *IndexRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_IndexRequest.Merge(m, src) } -func (m *IndexDocumentRequest) XXX_Size() int { - return xxx_messageInfo_IndexDocumentRequest.Size(m) +func (m *IndexRequest) XXX_Size() int { + return xxx_messageInfo_IndexRequest.Size(m) } -func (m *IndexDocumentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_IndexDocumentRequest.DiscardUnknown(m) +func (m *IndexRequest) XXX_DiscardUnknown() { + xxx_messageInfo_IndexRequest.DiscardUnknown(m) } -var xxx_messageInfo_IndexDocumentRequest proto.InternalMessageInfo +var xxx_messageInfo_IndexRequest proto.InternalMessageInfo + +func (m *IndexRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} -func (m *IndexDocumentRequest) GetDocument() *index.Document { +func (m *IndexRequest) GetFields() *any.Any { if m != nil { - return m.Document + return m.Fields } return nil } -type IndexDocumentResponse struct { - Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` +type DeleteRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *IndexDocumentResponse) Reset() { *m = IndexDocumentResponse{} } -func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) } -func (*IndexDocumentResponse) ProtoMessage() {} -func (*IndexDocumentResponse) Descriptor() ([]byte, []int) { +func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } +func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRequest) ProtoMessage() {} +func (*DeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor_0b1b3e8a99d31c9c, []int{5} } -func (m *IndexDocumentResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexDocumentResponse.Unmarshal(m, b) +func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) +} +func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) +} +func (m *DeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteRequest.Merge(m, src) +} +func (m *DeleteRequest) XXX_Size() int { + return xxx_messageInfo_DeleteRequest.Size(m) +} +func (m *DeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo + +func (m *DeleteRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type BulkIndexRequest struct { + Documents []*index.Document `protobuf:"bytes,1,rep,name=documents,proto3" json:"documents,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BulkIndexRequest) Reset() { *m = BulkIndexRequest{} } +func (m *BulkIndexRequest) String() string { return proto.CompactTextString(m) } +func (*BulkIndexRequest) ProtoMessage() {} +func (*BulkIndexRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{6} +} + +func (m *BulkIndexRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkIndexRequest.Unmarshal(m, b) } -func (m *IndexDocumentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexDocumentResponse.Marshal(b, m, deterministic) +func (m *BulkIndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkIndexRequest.Marshal(b, m, deterministic) } -func (m *IndexDocumentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexDocumentResponse.Merge(m, src) +func (m *BulkIndexRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkIndexRequest.Merge(m, src) } -func (m *IndexDocumentResponse) XXX_Size() int { - return xxx_messageInfo_IndexDocumentResponse.Size(m) +func (m *BulkIndexRequest) XXX_Size() int { + return xxx_messageInfo_BulkIndexRequest.Size(m) } -func (m *IndexDocumentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_IndexDocumentResponse.DiscardUnknown(m) +func (m *BulkIndexRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BulkIndexRequest.DiscardUnknown(m) } -var xxx_messageInfo_IndexDocumentResponse proto.InternalMessageInfo +var xxx_messageInfo_BulkIndexRequest proto.InternalMessageInfo -func (m *IndexDocumentResponse) GetCount() int32 { +func (m *BulkIndexRequest) GetDocuments() []*index.Document { + if m != nil { + return m.Documents + } + return nil +} + +type BulkIndexResponse struct { + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BulkIndexResponse) Reset() { *m = BulkIndexResponse{} } +func (m *BulkIndexResponse) String() string { return proto.CompactTextString(m) } +func (*BulkIndexResponse) ProtoMessage() {} +func (*BulkIndexResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{7} +} + +func (m *BulkIndexResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkIndexResponse.Unmarshal(m, b) +} +func (m *BulkIndexResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkIndexResponse.Marshal(b, m, deterministic) +} +func (m *BulkIndexResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkIndexResponse.Merge(m, src) +} +func (m *BulkIndexResponse) XXX_Size() int { + return xxx_messageInfo_BulkIndexResponse.Size(m) +} +func (m *BulkIndexResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BulkIndexResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BulkIndexResponse proto.InternalMessageInfo + +func (m *BulkIndexResponse) GetCount() int32 { if m != nil { return m.Count } return 0 } -type DeleteDocumentRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +type BulkDeleteRequest struct { + Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *DeleteDocumentRequest) Reset() { *m = DeleteDocumentRequest{} } -func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteDocumentRequest) ProtoMessage() {} -func (*DeleteDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{6} +func (m *BulkDeleteRequest) Reset() { *m = BulkDeleteRequest{} } +func (m *BulkDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*BulkDeleteRequest) ProtoMessage() {} +func (*BulkDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{8} } -func (m *DeleteDocumentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteDocumentRequest.Unmarshal(m, b) +func (m *BulkDeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkDeleteRequest.Unmarshal(m, b) } -func (m *DeleteDocumentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteDocumentRequest.Marshal(b, m, deterministic) +func (m *BulkDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkDeleteRequest.Marshal(b, m, deterministic) } -func (m *DeleteDocumentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteDocumentRequest.Merge(m, src) +func (m *BulkDeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkDeleteRequest.Merge(m, src) } -func (m *DeleteDocumentRequest) XXX_Size() int { - return xxx_messageInfo_DeleteDocumentRequest.Size(m) +func (m *BulkDeleteRequest) XXX_Size() int { + return xxx_messageInfo_BulkDeleteRequest.Size(m) } -func (m *DeleteDocumentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteDocumentRequest.DiscardUnknown(m) +func (m *BulkDeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BulkDeleteRequest.DiscardUnknown(m) } -var xxx_messageInfo_DeleteDocumentRequest proto.InternalMessageInfo +var xxx_messageInfo_BulkDeleteRequest proto.InternalMessageInfo -func (m *DeleteDocumentRequest) GetId() string { +func (m *BulkDeleteRequest) GetIds() []string { if m != nil { - return m.Id + return m.Ids } - return "" + return nil } -type DeleteDocumentResponse struct { +type BulkDeleteResponse struct { Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *DeleteDocumentResponse) Reset() { *m = DeleteDocumentResponse{} } -func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteDocumentResponse) ProtoMessage() {} -func (*DeleteDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{7} +func (m *BulkDeleteResponse) Reset() { *m = BulkDeleteResponse{} } +func (m *BulkDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*BulkDeleteResponse) ProtoMessage() {} +func (*BulkDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{9} } -func (m *DeleteDocumentResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteDocumentResponse.Unmarshal(m, b) +func (m *BulkDeleteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkDeleteResponse.Unmarshal(m, b) } -func (m *DeleteDocumentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteDocumentResponse.Marshal(b, m, deterministic) +func (m *BulkDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkDeleteResponse.Marshal(b, m, deterministic) } -func (m *DeleteDocumentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteDocumentResponse.Merge(m, src) +func (m *BulkDeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkDeleteResponse.Merge(m, src) } -func (m *DeleteDocumentResponse) XXX_Size() int { - return xxx_messageInfo_DeleteDocumentResponse.Size(m) +func (m *BulkDeleteResponse) XXX_Size() int { + return xxx_messageInfo_BulkDeleteResponse.Size(m) } -func (m *DeleteDocumentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteDocumentResponse.DiscardUnknown(m) +func (m *BulkDeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BulkDeleteResponse.DiscardUnknown(m) } -var xxx_messageInfo_DeleteDocumentResponse proto.InternalMessageInfo +var xxx_messageInfo_BulkDeleteResponse proto.InternalMessageInfo -func (m *DeleteDocumentResponse) GetCount() int32 { +func (m *BulkDeleteResponse) GetCount() int32 { if m != nil { return m.Count } @@ -412,7 +508,7 @@ func (m *SearchRequest) Reset() { *m = SearchRequest{} } func (m *SearchRequest) String() string { return proto.CompactTextString(m) } func (*SearchRequest) ProtoMessage() {} func (*SearchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{8} + return fileDescriptor_0b1b3e8a99d31c9c, []int{10} } func (m *SearchRequest) XXX_Unmarshal(b []byte) error { @@ -451,7 +547,7 @@ func (m *SearchResponse) Reset() { *m = SearchResponse{} } func (m *SearchResponse) String() string { return proto.CompactTextString(m) } func (*SearchResponse) ProtoMessage() {} func (*SearchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{9} + return fileDescriptor_0b1b3e8a99d31c9c, []int{11} } func (m *SearchResponse) XXX_Unmarshal(b []byte) error { @@ -484,12 +580,14 @@ func init() { proto.RegisterEnum("distribute.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) proto.RegisterType((*NodeHealthCheckRequest)(nil), "distribute.NodeHealthCheckRequest") proto.RegisterType((*NodeHealthCheckResponse)(nil), "distribute.NodeHealthCheckResponse") - proto.RegisterType((*GetDocumentRequest)(nil), "distribute.GetDocumentRequest") - proto.RegisterType((*GetDocumentResponse)(nil), "distribute.GetDocumentResponse") - proto.RegisterType((*IndexDocumentRequest)(nil), "distribute.IndexDocumentRequest") - proto.RegisterType((*IndexDocumentResponse)(nil), "distribute.IndexDocumentResponse") - proto.RegisterType((*DeleteDocumentRequest)(nil), "distribute.DeleteDocumentRequest") - proto.RegisterType((*DeleteDocumentResponse)(nil), "distribute.DeleteDocumentResponse") + proto.RegisterType((*GetRequest)(nil), "distribute.GetRequest") + proto.RegisterType((*GetResponse)(nil), "distribute.GetResponse") + proto.RegisterType((*IndexRequest)(nil), "distribute.IndexRequest") + proto.RegisterType((*DeleteRequest)(nil), "distribute.DeleteRequest") + proto.RegisterType((*BulkIndexRequest)(nil), "distribute.BulkIndexRequest") + proto.RegisterType((*BulkIndexResponse)(nil), "distribute.BulkIndexResponse") + proto.RegisterType((*BulkDeleteRequest)(nil), "distribute.BulkDeleteRequest") + proto.RegisterType((*BulkDeleteResponse)(nil), "distribute.BulkDeleteResponse") proto.RegisterType((*SearchRequest)(nil), "distribute.SearchRequest") proto.RegisterType((*SearchResponse)(nil), "distribute.SearchResponse") } @@ -499,42 +597,55 @@ func init() { } var fileDescriptor_0b1b3e8a99d31c9c = []byte{ - // 556 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x5d, 0x6f, 0xd3, 0x30, - 0x14, 0x5d, 0xba, 0x65, 0xb4, 0xb7, 0xeb, 0x87, 0x4c, 0x37, 0x58, 0x1e, 0x60, 0x98, 0x49, 0x14, - 0x4d, 0x73, 0xa5, 0x22, 0x1e, 0x10, 0x12, 0x28, 0x5b, 0x2a, 0x5a, 0x51, 0x95, 0x29, 0x1d, 0x88, - 0x01, 0xd2, 0x94, 0x34, 0xa6, 0x8d, 0xd6, 0xd6, 0x25, 0x76, 0x24, 0xf6, 0x2b, 0xf8, 0x27, 0x3c, - 0xf2, 0xfb, 0x50, 0xe2, 0x24, 0x24, 0x5d, 0xc8, 0xe0, 0xa5, 0xea, 0xbd, 0xf7, 0x9c, 0xe3, 0xe3, - 0xdc, 0x93, 0xc0, 0xe1, 0xca, 0x63, 0x82, 0xd9, 0xfe, 0xd7, 0x8e, 0xe3, 0x72, 0xe1, 0xb9, 0xb6, - 0x2f, 0x68, 0xea, 0x2f, 0x09, 0xc7, 0x08, 0xfe, 0x74, 0xb4, 0xfd, 0x29, 0x63, 0xd3, 0x39, 0xed, - 0x24, 0x44, 0x6b, 0x79, 0x2d, 0x61, 0x9a, 0x96, 0xf4, 0xdc, 0xa5, 0x43, 0xbf, 0xcb, 0x5f, 0x39, - 0xc3, 0x3f, 0x14, 0xd8, 0x1b, 0x31, 0x87, 0xf6, 0xa9, 0x35, 0x17, 0xb3, 0xd3, 0x19, 0x9d, 0x5c, - 0x99, 0xf4, 0x9b, 0x4f, 0xb9, 0x40, 0xaf, 0x40, 0x5d, 0x79, 0xcc, 0xa6, 0xf7, 0x95, 0x03, 0xa5, - 0x5d, 0xef, 0xb6, 0x49, 0xea, 0xfc, 0x7c, 0x0a, 0x39, 0x0b, 0xf0, 0xa6, 0xa4, 0xe1, 0xe7, 0xa0, - 0x86, 0x35, 0x6a, 0x40, 0xb5, 0xdf, 0xd3, 0x87, 0xe7, 0xfd, 0xc1, 0xa8, 0x37, 0x1e, 0x37, 0x37, - 0xd0, 0x0e, 0x94, 0x87, 0x83, 0x0f, 0xbd, 0xb0, 0x52, 0x50, 0x0d, 0x2a, 0x66, 0x4f, 0x37, 0xe4, - 0xb0, 0x84, 0x7f, 0x2a, 0x70, 0xef, 0x86, 0x3c, 0x5f, 0xb1, 0x25, 0xa7, 0xe8, 0x35, 0xa8, 0x5c, - 0x58, 0x22, 0xb6, 0xf4, 0xb4, 0xd0, 0x92, 0xe4, 0x90, 0x71, 0x40, 0x30, 0x25, 0x0f, 0x9b, 0xa0, - 0x86, 0x35, 0xaa, 0xc2, 0x1d, 0xe9, 0xe9, 0xa2, 0xb9, 0x11, 0x38, 0x78, 0x3f, 0x8a, 0x4b, 0x05, - 0x55, 0x40, 0xd5, 0x03, 0x7f, 0xcd, 0x12, 0x2a, 0xc3, 0x96, 0xd1, 0xd3, 0x8d, 0xe6, 0x66, 0xd0, - 0x0c, 0x5c, 0x5e, 0x34, 0xb7, 0x02, 0xf8, 0xe8, 0xdd, 0xf9, 0xa5, 0x2c, 0x55, 0x7c, 0x08, 0xe8, - 0x0d, 0x15, 0x06, 0x9b, 0xf8, 0x0b, 0xba, 0x14, 0xf1, 0xd3, 0xab, 0x43, 0xc9, 0x75, 0x42, 0x9f, - 0x15, 0xb3, 0xe4, 0x3a, 0xf8, 0x04, 0xee, 0x66, 0x50, 0xd1, 0x8d, 0x8e, 0xa0, 0xec, 0x44, 0xbd, - 0x10, 0x5c, 0xed, 0x36, 0x88, 0xdc, 0x4f, 0x02, 0x4d, 0x00, 0xf8, 0x14, 0x5a, 0x83, 0x60, 0xb6, - 0x7e, 0xd6, 0x7f, 0x89, 0x1c, 0xc3, 0xee, 0x9a, 0x48, 0x64, 0xa5, 0x05, 0xea, 0x84, 0xf9, 0x91, - 0x84, 0x6a, 0xca, 0x02, 0x3f, 0x81, 0x5d, 0x83, 0xce, 0xa9, 0xa0, 0xb7, 0x5d, 0x90, 0xc0, 0xde, - 0x3a, 0xb0, 0x50, 0x78, 0x08, 0xb5, 0x31, 0xb5, 0xbc, 0xc9, 0x2c, 0x16, 0x7c, 0x09, 0x75, 0x1e, - 0x36, 0x2e, 0x3d, 0xd9, 0x89, 0xee, 0xd2, 0x22, 0x32, 0xda, 0x24, 0x8e, 0x31, 0xd1, 0x97, 0xd7, - 0x66, 0x8d, 0xa7, 0xc9, 0xf8, 0x2d, 0xd4, 0x63, 0xb5, 0xe8, 0xd4, 0x17, 0x50, 0x4b, 0xe4, 0xb8, - 0x3f, 0x2f, 0x56, 0xdb, 0x89, 0xd5, 0x02, 0x64, 0xf7, 0xd7, 0x26, 0x80, 0x91, 0x24, 0x0b, 0x7d, - 0x81, 0xc6, 0x5a, 0xb8, 0x10, 0xbe, 0xfd, 0x65, 0xd0, 0x1e, 0xff, 0x43, 0x3a, 0xf1, 0x06, 0x3a, - 0x83, 0x6a, 0x2a, 0x18, 0xe8, 0x41, 0x9a, 0x75, 0x33, 0x57, 0xda, 0xc3, 0xbf, 0xce, 0x13, 0xc5, - 0x8f, 0x50, 0xcb, 0x6c, 0x18, 0x1d, 0xa4, 0x39, 0x79, 0x09, 0xd2, 0x1e, 0x15, 0x20, 0x62, 0xdd, - 0xb6, 0x82, 0x3e, 0x43, 0x3d, 0xbb, 0x63, 0x94, 0x21, 0xe6, 0x06, 0x45, 0xc3, 0x45, 0x90, 0x94, - 0xb8, 0x0e, 0xdb, 0x72, 0x85, 0x68, 0x3f, 0xcd, 0xc8, 0x84, 0x44, 0xd3, 0xf2, 0x46, 0xb1, 0xc8, - 0xc9, 0xf1, 0xa7, 0xa3, 0xa9, 0x2b, 0x66, 0xbe, 0x4d, 0x26, 0x6c, 0xd1, 0x59, 0x30, 0xee, 0x5f, - 0x59, 0x1d, 0x7b, 0x6e, 0x71, 0xd1, 0xc9, 0xf9, 0xa0, 0xda, 0xdb, 0x61, 0xf3, 0xd9, 0xef, 0x00, - 0x00, 0x00, 0xff, 0xff, 0xd2, 0x53, 0x25, 0x64, 0x6e, 0x05, 0x00, 0x00, + // 759 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0xdd, 0x4e, 0xdb, 0x48, + 0x18, 0x5d, 0x27, 0x38, 0x90, 0x2f, 0x24, 0xf1, 0xce, 0x42, 0x00, 0x6f, 0xd8, 0x5d, 0x79, 0x77, + 0x25, 0xf0, 0x2e, 0xb6, 0x36, 0xdb, 0x9b, 0x82, 0xda, 0x2a, 0x34, 0x11, 0x20, 0xa2, 0x50, 0x39, + 0x40, 0x05, 0x52, 0x45, 0x9d, 0x78, 0x20, 0x56, 0x1c, 0x3b, 0x8d, 0xc7, 0x15, 0xa8, 0xea, 0x4d, + 0x5f, 0xa1, 0xb7, 0x7d, 0x93, 0x5e, 0xf6, 0x11, 0xfa, 0x0a, 0x7d, 0x90, 0x6a, 0x66, 0xec, 0xc4, + 0x26, 0x3f, 0xe5, 0x06, 0xf9, 0xfb, 0x3b, 0xe7, 0xcc, 0x7c, 0x67, 0x08, 0xfc, 0x35, 0x18, 0x7a, + 0xc4, 0x6b, 0x07, 0xd7, 0xba, 0x65, 0xfb, 0x64, 0x68, 0xb7, 0x03, 0x82, 0x63, 0x9f, 0x1a, 0x2b, + 0x23, 0x18, 0x67, 0xe4, 0x8d, 0x1b, 0xcf, 0xbb, 0x71, 0xb0, 0x3e, 0x1a, 0x34, 0xdd, 0x3b, 0xde, + 0x26, 0xff, 0x7a, 0xbf, 0x84, 0xfb, 0x03, 0x12, 0x15, 0xe5, 0x51, 0xd6, 0x76, 0x2d, 0x7c, 0xcb, + 0xff, 0x86, 0xb5, 0x72, 0x38, 0x68, 0x0e, 0x6c, 0xdd, 0x74, 0x5d, 0x8f, 0x98, 0xc4, 0xf6, 0x5c, + 0x9f, 0x57, 0x95, 0x4f, 0x02, 0x94, 0x9a, 0x9e, 0x85, 0x0f, 0xb1, 0xe9, 0x90, 0xee, 0xf3, 0x2e, + 0xee, 0xf4, 0x0c, 0xfc, 0x26, 0xc0, 0x3e, 0x41, 0x4f, 0x41, 0x1c, 0x0c, 0xbd, 0x36, 0x5e, 0x17, + 0xfe, 0x10, 0xb6, 0x0a, 0x95, 0x2d, 0x2d, 0x26, 0x7d, 0xfa, 0x88, 0xf6, 0x82, 0xf6, 0x1b, 0x7c, + 0x4c, 0xd9, 0x07, 0x91, 0xc5, 0x28, 0x07, 0x8b, 0x67, 0xcd, 0xe3, 0xe6, 0xc9, 0xcb, 0xa6, 0xf4, + 0x13, 0x2a, 0x42, 0xee, 0xb0, 0x5e, 0x6d, 0x9c, 0x1e, 0x1e, 0x35, 0xeb, 0xad, 0x96, 0x24, 0xa0, + 0x65, 0x58, 0x6a, 0x1c, 0x9d, 0xd7, 0x59, 0x94, 0x42, 0x79, 0xc8, 0x1a, 0xf5, 0x6a, 0x8d, 0x17, + 0xd3, 0xca, 0x67, 0x01, 0xd6, 0x26, 0xb8, 0xfc, 0x81, 0xe7, 0xfa, 0x18, 0x3d, 0x03, 0xd1, 0x27, + 0x26, 0x89, 0xf4, 0x6d, 0xcf, 0xd5, 0xc7, 0x67, 0xb4, 0x16, 0x1d, 0x30, 0xf8, 0x9c, 0x72, 0x05, + 0x22, 0x8b, 0x93, 0x02, 0x73, 0xb0, 0xc8, 0x05, 0x5e, 0x48, 0x02, 0x95, 0x73, 0xd6, 0x8c, 0xc2, + 0x14, 0xca, 0x82, 0x58, 0xa5, 0x62, 0xa5, 0x34, 0x5a, 0x82, 0x85, 0x5a, 0xbd, 0x5a, 0x93, 0x16, + 0x68, 0x92, 0x4a, 0xbe, 0x90, 0x44, 0xda, 0xde, 0x3c, 0x39, 0xbd, 0xe2, 0x61, 0x46, 0x29, 0x03, + 0x1c, 0x60, 0x12, 0xdd, 0x67, 0x01, 0x52, 0xb6, 0xc5, 0xc4, 0x66, 0x8d, 0x94, 0x6d, 0x29, 0x7b, + 0x90, 0x63, 0xd5, 0xf0, 0x38, 0xff, 0x42, 0xe6, 0xda, 0xc6, 0x8e, 0xe5, 0xb3, 0x96, 0x5c, 0x65, + 0x45, 0xe3, 0x8b, 0xd3, 0xa2, 0xdd, 0x6a, 0x55, 0xf7, 0xce, 0x08, 0x7b, 0x94, 0x06, 0x2c, 0x1f, + 0xd1, 0x25, 0xcf, 0x00, 0x8f, 0xa1, 0xa5, 0x1e, 0x80, 0xf6, 0x3b, 0xe4, 0x6b, 0xd8, 0xc1, 0x04, + 0xcf, 0xd2, 0x5a, 0x05, 0x69, 0x3f, 0x70, 0x7a, 0x09, 0xca, 0x1d, 0xc8, 0x5a, 0x5e, 0x27, 0xe8, + 0x63, 0x97, 0x50, 0xcd, 0xe9, 0xad, 0x5c, 0xa5, 0xa8, 0x71, 0xe7, 0xd5, 0xc2, 0xbc, 0x31, 0xee, + 0x50, 0xb6, 0xe1, 0xe7, 0x18, 0x44, 0x78, 0xe8, 0x15, 0x10, 0x3b, 0x5e, 0xe0, 0x12, 0x46, 0x25, + 0x1a, 0x3c, 0x50, 0xfe, 0xe6, 0xad, 0x49, 0x49, 0x12, 0xa4, 0x6d, 0x8b, 0x13, 0x65, 0x0d, 0xfa, + 0xa9, 0xa8, 0x80, 0xe2, 0x6d, 0x73, 0x21, 0x1b, 0x90, 0x6f, 0x61, 0x73, 0xd8, 0xe9, 0x46, 0x70, + 0x7b, 0x50, 0xf0, 0x59, 0xe2, 0x6a, 0xc8, 0x33, 0x73, 0xaf, 0x3d, 0xef, 0xc7, 0x87, 0x95, 0x63, + 0x28, 0x44, 0x68, 0x21, 0xeb, 0x63, 0xc8, 0x8f, 0xe0, 0xfc, 0xc0, 0x99, 0x8f, 0xb6, 0x1c, 0xa1, + 0xd1, 0xce, 0xca, 0x17, 0x11, 0xa0, 0x36, 0xb2, 0x2e, 0xba, 0x85, 0xe2, 0x3d, 0xf7, 0x22, 0xe5, + 0xc7, 0x4f, 0x4f, 0xfe, 0xf3, 0x01, 0xf6, 0x57, 0xca, 0x1f, 0xbe, 0x7e, 0xfb, 0x98, 0x2a, 0xa1, + 0x15, 0xfd, 0xed, 0x7f, 0xba, 0xeb, 0x59, 0x58, 0xef, 0xb2, 0xae, 0x0e, 0xa3, 0x39, 0x83, 0xf4, + 0x01, 0x26, 0xa8, 0x14, 0x47, 0x1a, 0xfb, 0x57, 0x5e, 0x9b, 0xc8, 0x87, 0xa8, 0x9b, 0x0c, 0x75, + 0x0d, 0xad, 0x52, 0xd4, 0xd1, 0xc2, 0xf5, 0x77, 0xb6, 0xf5, 0x44, 0x55, 0xdf, 0x23, 0x0f, 0x44, + 0xb6, 0x74, 0xb4, 0x1e, 0x07, 0x88, 0x5b, 0x49, 0x2e, 0x4d, 0x5c, 0x53, 0x9d, 0xfe, 0x77, 0x53, + 0x1e, 0x31, 0x64, 0x4d, 0xce, 0x27, 0x90, 0x77, 0x05, 0xf5, 0x52, 0x96, 0xa7, 0xb3, 0xed, 0x0a, + 0x2a, 0xba, 0x84, 0x0c, 0xf7, 0x04, 0xda, 0x88, 0x33, 0x26, 0xec, 0x34, 0x93, 0x32, 0x3c, 0x8c, + 0x3a, 0xe3, 0x30, 0xaf, 0x20, 0x3b, 0x72, 0x31, 0x2a, 0xc7, 0xe1, 0xef, 0xbf, 0x0f, 0x79, 0x73, + 0x46, 0x35, 0xbc, 0xb5, 0x5f, 0x18, 0x51, 0x5e, 0x5e, 0xa2, 0x44, 0xed, 0xc0, 0xe9, 0x51, 0xe9, + 0xaf, 0x01, 0xc6, 0x96, 0x46, 0x13, 0x08, 0xc9, 0x23, 0xfc, 0x36, 0xab, 0x9c, 0x64, 0x50, 0x13, + 0x0c, 0xe7, 0x90, 0xe1, 0xd6, 0x4d, 0x5e, 0x4e, 0xe2, 0x71, 0xc8, 0xf2, 0xb4, 0x52, 0x88, 0xba, + 0xca, 0x50, 0x8b, 0x0a, 0x50, 0x54, 0x6e, 0xe4, 0x5d, 0x41, 0xdd, 0xdf, 0xb9, 0xfc, 0xe7, 0xc6, + 0x26, 0xdd, 0xa0, 0xad, 0x75, 0xbc, 0xbe, 0xde, 0xf7, 0xfc, 0xa0, 0x67, 0xea, 0x6d, 0xc7, 0xf4, + 0x89, 0x3e, 0xe5, 0x67, 0xb0, 0x9d, 0x61, 0xc9, 0xff, 0xbf, 0x07, 0x00, 0x00, 0xff, 0xff, 0x7f, + 0xf4, 0x78, 0x1a, 0x24, 0x07, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -550,9 +661,11 @@ const _ = grpc.SupportPackageIsVersion4 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type DistributeClient interface { NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) - GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) - IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Distribute_IndexDocumentClient, error) - DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Distribute_DeleteDocumentClient, error) + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) + Index(ctx context.Context, in *IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) + BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) + BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) } @@ -573,81 +686,49 @@ func (c *distributeClient) NodeHealthCheck(ctx context.Context, in *NodeHealthCh return out, nil } -func (c *distributeClient) GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) { - out := new(GetDocumentResponse) - err := c.cc.Invoke(ctx, "/distribute.Distribute/GetDocument", in, out, opts...) +func (c *distributeClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { + out := new(GetResponse) + err := c.cc.Invoke(ctx, "/distribute.Distribute/Get", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *distributeClient) IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Distribute_IndexDocumentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Distribute_serviceDesc.Streams[0], "/distribute.Distribute/IndexDocument", opts...) +func (c *distributeClient) Index(ctx context.Context, in *IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/distribute.Distribute/Index", in, out, opts...) if err != nil { return nil, err } - x := &distributeIndexDocumentClient{stream} - return x, nil -} - -type Distribute_IndexDocumentClient interface { - Send(*IndexDocumentRequest) error - CloseAndRecv() (*IndexDocumentResponse, error) - grpc.ClientStream -} - -type distributeIndexDocumentClient struct { - grpc.ClientStream -} - -func (x *distributeIndexDocumentClient) Send(m *IndexDocumentRequest) error { - return x.ClientStream.SendMsg(m) + return out, nil } -func (x *distributeIndexDocumentClient) CloseAndRecv() (*IndexDocumentResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(IndexDocumentResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { +func (c *distributeClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/distribute.Distribute/Delete", in, out, opts...) + if err != nil { return nil, err } - return m, nil + return out, nil } -func (c *distributeClient) DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Distribute_DeleteDocumentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Distribute_serviceDesc.Streams[1], "/distribute.Distribute/DeleteDocument", opts...) +func (c *distributeClient) BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) { + out := new(BulkIndexResponse) + err := c.cc.Invoke(ctx, "/distribute.Distribute/BulkIndex", in, out, opts...) if err != nil { return nil, err } - x := &distributeDeleteDocumentClient{stream} - return x, nil -} - -type Distribute_DeleteDocumentClient interface { - Send(*DeleteDocumentRequest) error - CloseAndRecv() (*DeleteDocumentResponse, error) - grpc.ClientStream -} - -type distributeDeleteDocumentClient struct { - grpc.ClientStream -} - -func (x *distributeDeleteDocumentClient) Send(m *DeleteDocumentRequest) error { - return x.ClientStream.SendMsg(m) + return out, nil } -func (x *distributeDeleteDocumentClient) CloseAndRecv() (*DeleteDocumentResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(DeleteDocumentResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { +func (c *distributeClient) BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) { + out := new(BulkDeleteResponse) + err := c.cc.Invoke(ctx, "/distribute.Distribute/BulkDelete", in, out, opts...) + if err != nil { return nil, err } - return m, nil + return out, nil } func (c *distributeClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { @@ -662,12 +743,40 @@ func (c *distributeClient) Search(ctx context.Context, in *SearchRequest, opts . // DistributeServer is the server API for Distribute service. type DistributeServer interface { NodeHealthCheck(context.Context, *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) - GetDocument(context.Context, *GetDocumentRequest) (*GetDocumentResponse, error) - IndexDocument(Distribute_IndexDocumentServer) error - DeleteDocument(Distribute_DeleteDocumentServer) error + Get(context.Context, *GetRequest) (*GetResponse, error) + Index(context.Context, *IndexRequest) (*empty.Empty, error) + Delete(context.Context, *DeleteRequest) (*empty.Empty, error) + BulkIndex(context.Context, *BulkIndexRequest) (*BulkIndexResponse, error) + BulkDelete(context.Context, *BulkDeleteRequest) (*BulkDeleteResponse, error) Search(context.Context, *SearchRequest) (*SearchResponse, error) } +// UnimplementedDistributeServer can be embedded to have forward compatible implementations. +type UnimplementedDistributeServer struct { +} + +func (*UnimplementedDistributeServer) NodeHealthCheck(ctx context.Context, req *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeHealthCheck not implemented") +} +func (*UnimplementedDistributeServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (*UnimplementedDistributeServer) Index(ctx context.Context, req *IndexRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Index not implemented") +} +func (*UnimplementedDistributeServer) Delete(ctx context.Context, req *DeleteRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (*UnimplementedDistributeServer) BulkIndex(ctx context.Context, req *BulkIndexRequest) (*BulkIndexResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BulkIndex not implemented") +} +func (*UnimplementedDistributeServer) BulkDelete(ctx context.Context, req *BulkDeleteRequest) (*BulkDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BulkDelete not implemented") +} +func (*UnimplementedDistributeServer) Search(ctx context.Context, req *SearchRequest) (*SearchResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Search not implemented") +} + func RegisterDistributeServer(s *grpc.Server, srv DistributeServer) { s.RegisterService(&_Distribute_serviceDesc, srv) } @@ -690,74 +799,94 @@ func _Distribute_NodeHealthCheck_Handler(srv interface{}, ctx context.Context, d return interceptor(ctx, in, info, handler) } -func _Distribute_GetDocument_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetDocumentRequest) +func _Distribute_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(DistributeServer).GetDocument(ctx, in) + return srv.(DistributeServer).Get(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/distribute.Distribute/GetDocument", + FullMethod: "/distribute.Distribute/Get", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).GetDocument(ctx, req.(*GetDocumentRequest)) + return srv.(DistributeServer).Get(ctx, req.(*GetRequest)) } return interceptor(ctx, in, info, handler) } -func _Distribute_IndexDocument_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(DistributeServer).IndexDocument(&distributeIndexDocumentServer{stream}) -} - -type Distribute_IndexDocumentServer interface { - SendAndClose(*IndexDocumentResponse) error - Recv() (*IndexDocumentRequest, error) - grpc.ServerStream -} - -type distributeIndexDocumentServer struct { - grpc.ServerStream -} - -func (x *distributeIndexDocumentServer) SendAndClose(m *IndexDocumentResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *distributeIndexDocumentServer) Recv() (*IndexDocumentRequest, error) { - m := new(IndexDocumentRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { +func _Distribute_Index_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(IndexRequest) + if err := dec(in); err != nil { return nil, err } - return m, nil -} - -func _Distribute_DeleteDocument_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(DistributeServer).DeleteDocument(&distributeDeleteDocumentServer{stream}) -} - -type Distribute_DeleteDocumentServer interface { - SendAndClose(*DeleteDocumentResponse) error - Recv() (*DeleteDocumentRequest, error) - grpc.ServerStream + if interceptor == nil { + return srv.(DistributeServer).Index(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/distribute.Distribute/Index", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DistributeServer).Index(ctx, req.(*IndexRequest)) + } + return interceptor(ctx, in, info, handler) } -type distributeDeleteDocumentServer struct { - grpc.ServerStream +func _Distribute_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DistributeServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/distribute.Distribute/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DistributeServer).Delete(ctx, req.(*DeleteRequest)) + } + return interceptor(ctx, in, info, handler) } -func (x *distributeDeleteDocumentServer) SendAndClose(m *DeleteDocumentResponse) error { - return x.ServerStream.SendMsg(m) +func _Distribute_BulkIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BulkIndexRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DistributeServer).BulkIndex(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/distribute.Distribute/BulkIndex", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DistributeServer).BulkIndex(ctx, req.(*BulkIndexRequest)) + } + return interceptor(ctx, in, info, handler) } -func (x *distributeDeleteDocumentServer) Recv() (*DeleteDocumentRequest, error) { - m := new(DeleteDocumentRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { +func _Distribute_BulkDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BulkDeleteRequest) + if err := dec(in); err != nil { return nil, err } - return m, nil + if interceptor == nil { + return srv.(DistributeServer).BulkDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/distribute.Distribute/BulkDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DistributeServer).BulkDelete(ctx, req.(*BulkDeleteRequest)) + } + return interceptor(ctx, in, info, handler) } func _Distribute_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { @@ -787,25 +916,30 @@ var _Distribute_serviceDesc = grpc.ServiceDesc{ Handler: _Distribute_NodeHealthCheck_Handler, }, { - MethodName: "GetDocument", - Handler: _Distribute_GetDocument_Handler, + MethodName: "Get", + Handler: _Distribute_Get_Handler, }, { - MethodName: "Search", - Handler: _Distribute_Search_Handler, + MethodName: "Index", + Handler: _Distribute_Index_Handler, + }, + { + MethodName: "Delete", + Handler: _Distribute_Delete_Handler, }, - }, - Streams: []grpc.StreamDesc{ { - StreamName: "IndexDocument", - Handler: _Distribute_IndexDocument_Handler, - ClientStreams: true, + MethodName: "BulkIndex", + Handler: _Distribute_BulkIndex_Handler, }, { - StreamName: "DeleteDocument", - Handler: _Distribute_DeleteDocument_Handler, - ClientStreams: true, + MethodName: "BulkDelete", + Handler: _Distribute_BulkDelete_Handler, + }, + { + MethodName: "Search", + Handler: _Distribute_Search_Handler, }, }, + Streams: []grpc.StreamDesc{}, Metadata: "protobuf/distribute/distribute.proto", } diff --git a/protobuf/distribute/distribute.pb.gw.go b/protobuf/distribute/distribute.pb.gw.go new file mode 100644 index 0000000..e540253 --- /dev/null +++ b/protobuf/distribute/distribute.pb.gw.go @@ -0,0 +1,443 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: protobuf/distribute/distribute.proto + +/* +Package distribute is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package distribute + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +var ( + filter_Distribute_NodeHealthCheck_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Distribute_NodeHealthCheck_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq NodeHealthCheckRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Distribute_NodeHealthCheck_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.NodeHealthCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Distribute_Get_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Distribute_Index_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq IndexRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Index(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Distribute_Index_1(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq IndexRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Index(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Distribute_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Delete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Distribute_BulkIndex_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkIndexRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BulkIndex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Distribute_BulkDelete_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkDeleteRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BulkDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Distribute_Search_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SearchRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Search(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +// RegisterDistributeHandlerFromEndpoint is same as RegisterDistributeHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterDistributeHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterDistributeHandler(ctx, mux, conn) +} + +// RegisterDistributeHandler registers the http handlers for service Distribute to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterDistributeHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterDistributeHandlerClient(ctx, mux, NewDistributeClient(conn)) +} + +// RegisterDistributeHandlerClient registers the http handlers for service Distribute +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "DistributeClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "DistributeClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "DistributeClient" to call the correct interceptors. +func RegisterDistributeHandlerClient(ctx context.Context, mux *runtime.ServeMux, client DistributeClient) error { + + mux.Handle("GET", pattern_Distribute_NodeHealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Distribute_NodeHealthCheck_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Distribute_NodeHealthCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Distribute_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Distribute_Get_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Distribute_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Distribute_Index_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Distribute_Index_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Distribute_Index_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Distribute_Index_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Distribute_Index_1(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Distribute_Index_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Distribute_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Distribute_Delete_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Distribute_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Distribute_BulkIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Distribute_BulkIndex_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Distribute_BulkIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Distribute_BulkDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Distribute_BulkDelete_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Distribute_BulkDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Distribute_Search_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Distribute_Search_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Distribute_Search_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Distribute_NodeHealthCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "healthcheck"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Distribute_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Distribute_Index_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "documents"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Distribute_Index_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Distribute_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Distribute_BulkIndex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "bulk"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Distribute_BulkDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "bulk"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Distribute_Search_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "search"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Distribute_NodeHealthCheck_0 = runtime.ForwardResponseMessage + + forward_Distribute_Get_0 = runtime.ForwardResponseMessage + + forward_Distribute_Index_0 = runtime.ForwardResponseMessage + + forward_Distribute_Index_1 = runtime.ForwardResponseMessage + + forward_Distribute_Delete_0 = runtime.ForwardResponseMessage + + forward_Distribute_BulkIndex_0 = runtime.ForwardResponseMessage + + forward_Distribute_BulkDelete_0 = runtime.ForwardResponseMessage + + forward_Distribute_Search_0 = runtime.ForwardResponseMessage +) diff --git a/protobuf/distribute/distribute.proto b/protobuf/distribute/distribute.proto index 53d49d5..beaf5a6 100644 --- a/protobuf/distribute/distribute.proto +++ b/protobuf/distribute/distribute.proto @@ -15,63 +15,114 @@ syntax = "proto3"; import "google/protobuf/any.proto"; +import "google/protobuf/empty.proto"; import "protobuf/index/index.proto"; +import "google/api/annotations.proto"; package distribute; option go_package = "github.com/mosuka/blast/protobuf/distribute"; service Distribute { - rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) {} + rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) { + option (google.api.http) = { + get: "/v1/node/healthcheck" + }; + } - rpc GetDocument (GetDocumentRequest) returns (GetDocumentResponse) {} - rpc IndexDocument (stream IndexDocumentRequest) returns (IndexDocumentResponse) {} - rpc DeleteDocument (stream DeleteDocumentRequest) returns (DeleteDocumentResponse) {} - rpc Search (SearchRequest) returns (SearchResponse) {} + rpc Get (GetRequest) returns (GetResponse) { + option (google.api.http) = { + get: "/v1/documents/{id=**}" + }; + } + rpc Index (IndexRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + put: "/v1/documents" + body: "*" + additional_bindings { + put: "/v1/documents/{id=**}" + body: "*" + } + }; + } + rpc Delete (DeleteRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/documents/{id=**}" + }; + } + rpc BulkIndex (BulkIndexRequest) returns (BulkIndexResponse) { + option (google.api.http) = { + put: "/v1/bulk" + body: "*" + }; + } + rpc BulkDelete (BulkDeleteRequest) returns (BulkDeleteResponse) { + option (google.api.http) = { + delete: "/v1/bulk" + body: "*" + }; + } + rpc Search (SearchRequest) returns (SearchResponse) { + option (google.api.http) = { + post: "/v1/search" + body: "*" + }; + } } message NodeHealthCheckRequest { enum Probe { - HEALTHINESS = 0; - LIVENESS = 1; - READINESS = 2; + UNKNOWN = 0; + HEALTHINESS = 1; + LIVENESS = 2; + READINESS = 3; } Probe probe = 1; } message NodeHealthCheckResponse { enum State { - HEALTHY = 0; - UNHEALTHY = 1; - ALIVE = 2; - DEAD = 3; - READY = 4; - NOT_READY = 5; + UNKNOWN = 0; + HEALTHY = 1; + UNHEALTHY = 2; + ALIVE = 3; + DEAD = 4; + READY = 5; + NOT_READY = 6; } State state = 1; } -message GetDocumentRequest { +message GetRequest { string id = 1; } -message GetDocumentResponse { - index.Document document = 1; +message GetResponse { + google.protobuf.Any fields = 1; } -message IndexDocumentRequest { - index.Document document = 1; +message IndexRequest { + string id = 1; + google.protobuf.Any fields = 2; +} + +message DeleteRequest { + string id = 1; } -message IndexDocumentResponse { +message BulkIndexRequest { + repeated index.Document documents = 1; +} + +message BulkIndexResponse { int32 count = 1; } -message DeleteDocumentRequest { - string id = 1; +message BulkDeleteRequest { + repeated string ids = 1; } -message DeleteDocumentResponse { +message BulkDeleteResponse { int32 count = 1; } diff --git a/protobuf/index/index.go b/protobuf/index/index.go index fd80b99..31a3023 100644 --- a/protobuf/index/index.go +++ b/protobuf/index/index.go @@ -1,3 +1,17 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package index import ( @@ -5,7 +19,6 @@ import ( "errors" "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/protobuf" ) diff --git a/protobuf/index/index.pb.go b/protobuf/index/index.pb.go index f37f30a..b60dbee 100644 --- a/protobuf/index/index.pb.go +++ b/protobuf/index/index.pb.go @@ -9,7 +9,10 @@ import ( proto "github.com/golang/protobuf/proto" any "github.com/golang/protobuf/ptypes/any" empty "github.com/golang/protobuf/ptypes/empty" + _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" math "math" ) @@ -22,26 +25,29 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type NodeHealthCheckRequest_Probe int32 const ( - NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 0 - NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 1 - NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 2 + NodeHealthCheckRequest_UNKNOWN NodeHealthCheckRequest_Probe = 0 + NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 1 + NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 2 + NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 3 ) var NodeHealthCheckRequest_Probe_name = map[int32]string{ - 0: "HEALTHINESS", - 1: "LIVENESS", - 2: "READINESS", + 0: "UNKNOWN", + 1: "HEALTHINESS", + 2: "LIVENESS", + 3: "READINESS", } var NodeHealthCheckRequest_Probe_value = map[string]int32{ - "HEALTHINESS": 0, - "LIVENESS": 1, - "READINESS": 2, + "UNKNOWN": 0, + "HEALTHINESS": 1, + "LIVENESS": 2, + "READINESS": 3, } func (x NodeHealthCheckRequest_Probe) String() string { @@ -55,30 +61,33 @@ func (NodeHealthCheckRequest_Probe) EnumDescriptor() ([]byte, []int) { type NodeHealthCheckResponse_State int32 const ( - NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 0 - NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 1 - NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 2 - NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 3 - NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 4 - NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 5 + NodeHealthCheckResponse_UNKNOWN NodeHealthCheckResponse_State = 0 + NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 1 + NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 2 + NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 3 + NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 4 + NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 5 + NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 6 ) var NodeHealthCheckResponse_State_name = map[int32]string{ - 0: "HEALTHY", - 1: "UNHEALTHY", - 2: "ALIVE", - 3: "DEAD", - 4: "READY", - 5: "NOT_READY", + 0: "UNKNOWN", + 1: "HEALTHY", + 2: "UNHEALTHY", + 3: "ALIVE", + 4: "DEAD", + 5: "READY", + 6: "NOT_READY", } var NodeHealthCheckResponse_State_value = map[string]int32{ - "HEALTHY": 0, - "UNHEALTHY": 1, - "ALIVE": 2, - "DEAD": 3, - "READY": 4, - "NOT_READY": 5, + "UNKNOWN": 0, + "HEALTHY": 1, + "UNHEALTHY": 2, + "ALIVE": 3, + "DEAD": 4, + "READY": 5, + "NOT_READY": 6, } func (x NodeHealthCheckResponse_State) String() string { @@ -154,6 +163,46 @@ func (ClusterWatchResponse_Event) EnumDescriptor() ([]byte, []int) { return fileDescriptor_7b2daf652facb3ae, []int{9, 0} } +type Proposal_Event int32 + +const ( + Proposal_UNKNOWN Proposal_Event = 0 + Proposal_SET_NODE Proposal_Event = 1 + Proposal_DELETE_NODE Proposal_Event = 2 + Proposal_INDEX Proposal_Event = 3 + Proposal_DELETE Proposal_Event = 4 + Proposal_BULK_INDEX Proposal_Event = 5 + Proposal_BULK_DELETE Proposal_Event = 6 +) + +var Proposal_Event_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SET_NODE", + 2: "DELETE_NODE", + 3: "INDEX", + 4: "DELETE", + 5: "BULK_INDEX", + 6: "BULK_DELETE", +} + +var Proposal_Event_value = map[string]int32{ + "UNKNOWN": 0, + "SET_NODE": 1, + "DELETE_NODE": 2, + "INDEX": 3, + "DELETE": 4, + "BULK_INDEX": 5, + "BULK_DELETE": 6, +} + +func (x Proposal_Event) String() string { + return proto.EnumName(Proposal_Event_name, int32(x)) +} + +func (Proposal_Event) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{24, 0} +} + type NodeHealthCheckRequest struct { Probe NodeHealthCheckRequest_Probe `protobuf:"varint,1,opt,name=probe,proto3,enum=index.NodeHealthCheckRequest_Probe" json:"probe,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -190,7 +239,7 @@ func (m *NodeHealthCheckRequest) GetProbe() NodeHealthCheckRequest_Probe { if m != nil { return m.Probe } - return NodeHealthCheckRequest_HEALTHINESS + return NodeHealthCheckRequest_UNKNOWN } type NodeHealthCheckResponse struct { @@ -229,12 +278,13 @@ func (m *NodeHealthCheckResponse) GetState() NodeHealthCheckResponse_State { if m != nil { return m.State } - return NodeHealthCheckResponse_HEALTHY + return NodeHealthCheckResponse_UNKNOWN } type Metadata struct { GrpcAddress string `protobuf:"bytes,1,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` - HttpAddress string `protobuf:"bytes,2,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` + GrpcGatewayAddress string `protobuf:"bytes,2,opt,name=grpc_gateway_address,json=grpcGatewayAddress,proto3" json:"grpc_gateway_address,omitempty"` + HttpAddress string `protobuf:"bytes,3,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -272,6 +322,13 @@ func (m *Metadata) GetGrpcAddress() string { return "" } +func (m *Metadata) GetGrpcGatewayAddress() string { + if m != nil { + return m.GrpcGatewayAddress + } + return "" +} + func (m *Metadata) GetHttpAddress() string { if m != nil { return m.HttpAddress @@ -592,281 +649,368 @@ func (m *ClusterWatchResponse) GetCluster() *Cluster { return nil } -type Document struct { +type GetRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *Document) Reset() { *m = Document{} } -func (m *Document) String() string { return proto.CompactTextString(m) } -func (*Document) ProtoMessage() {} -func (*Document) Descriptor() ([]byte, []int) { +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} +func (*GetRequest) Descriptor() ([]byte, []int) { return fileDescriptor_7b2daf652facb3ae, []int{10} } -func (m *Document) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Document.Unmarshal(m, b) +func (m *GetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetRequest.Unmarshal(m, b) } -func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Document.Marshal(b, m, deterministic) +func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) } -func (m *Document) XXX_Merge(src proto.Message) { - xxx_messageInfo_Document.Merge(m, src) +func (m *GetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRequest.Merge(m, src) } -func (m *Document) XXX_Size() int { - return xxx_messageInfo_Document.Size(m) +func (m *GetRequest) XXX_Size() int { + return xxx_messageInfo_GetRequest.Size(m) } -func (m *Document) XXX_DiscardUnknown() { - xxx_messageInfo_Document.DiscardUnknown(m) +func (m *GetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRequest.DiscardUnknown(m) } -var xxx_messageInfo_Document proto.InternalMessageInfo +var xxx_messageInfo_GetRequest proto.InternalMessageInfo -func (m *Document) GetId() string { +func (m *GetRequest) GetId() string { if m != nil { return m.Id } return "" } -func (m *Document) GetFields() *any.Any { +type GetResponse struct { + // Document document = 1; + Fields *any.Any `protobuf:"bytes,1,opt,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} +func (*GetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{11} +} + +func (m *GetResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetResponse.Unmarshal(m, b) +} +func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) +} +func (m *GetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResponse.Merge(m, src) +} +func (m *GetResponse) XXX_Size() int { + return xxx_messageInfo_GetResponse.Size(m) +} +func (m *GetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetResponse proto.InternalMessageInfo + +func (m *GetResponse) GetFields() *any.Any { if m != nil { return m.Fields } return nil } -type GetDocumentRequest struct { +type IndexRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *GetDocumentRequest) Reset() { *m = GetDocumentRequest{} } -func (m *GetDocumentRequest) String() string { return proto.CompactTextString(m) } -func (*GetDocumentRequest) ProtoMessage() {} -func (*GetDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{11} +func (m *IndexRequest) Reset() { *m = IndexRequest{} } +func (m *IndexRequest) String() string { return proto.CompactTextString(m) } +func (*IndexRequest) ProtoMessage() {} +func (*IndexRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{12} } -func (m *GetDocumentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDocumentRequest.Unmarshal(m, b) +func (m *IndexRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IndexRequest.Unmarshal(m, b) } -func (m *GetDocumentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDocumentRequest.Marshal(b, m, deterministic) +func (m *IndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IndexRequest.Marshal(b, m, deterministic) } -func (m *GetDocumentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDocumentRequest.Merge(m, src) +func (m *IndexRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_IndexRequest.Merge(m, src) } -func (m *GetDocumentRequest) XXX_Size() int { - return xxx_messageInfo_GetDocumentRequest.Size(m) +func (m *IndexRequest) XXX_Size() int { + return xxx_messageInfo_IndexRequest.Size(m) } -func (m *GetDocumentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetDocumentRequest.DiscardUnknown(m) +func (m *IndexRequest) XXX_DiscardUnknown() { + xxx_messageInfo_IndexRequest.DiscardUnknown(m) } -var xxx_messageInfo_GetDocumentRequest proto.InternalMessageInfo +var xxx_messageInfo_IndexRequest proto.InternalMessageInfo -func (m *GetDocumentRequest) GetId() string { +func (m *IndexRequest) GetId() string { if m != nil { return m.Id } return "" } -type GetDocumentResponse struct { - Document *Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (m *IndexRequest) GetFields() *any.Any { + if m != nil { + return m.Fields + } + return nil } -func (m *GetDocumentResponse) Reset() { *m = GetDocumentResponse{} } -func (m *GetDocumentResponse) String() string { return proto.CompactTextString(m) } -func (*GetDocumentResponse) ProtoMessage() {} -func (*GetDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{12} +type DeleteRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *GetDocumentResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDocumentResponse.Unmarshal(m, b) +func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } +func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRequest) ProtoMessage() {} +func (*DeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{13} } -func (m *GetDocumentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDocumentResponse.Marshal(b, m, deterministic) + +func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) } -func (m *GetDocumentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDocumentResponse.Merge(m, src) +func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) } -func (m *GetDocumentResponse) XXX_Size() int { - return xxx_messageInfo_GetDocumentResponse.Size(m) +func (m *DeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteRequest.Merge(m, src) } -func (m *GetDocumentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetDocumentResponse.DiscardUnknown(m) +func (m *DeleteRequest) XXX_Size() int { + return xxx_messageInfo_DeleteRequest.Size(m) +} +func (m *DeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteRequest.DiscardUnknown(m) } -var xxx_messageInfo_GetDocumentResponse proto.InternalMessageInfo +var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo -func (m *GetDocumentResponse) GetDocument() *Document { +func (m *DeleteRequest) GetId() string { if m != nil { - return m.Document + return m.Id + } + return "" +} + +type Document struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} +func (*Document) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{14} +} + +func (m *Document) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Document.Unmarshal(m, b) +} +func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Document.Marshal(b, m, deterministic) +} +func (m *Document) XXX_Merge(src proto.Message) { + xxx_messageInfo_Document.Merge(m, src) +} +func (m *Document) XXX_Size() int { + return xxx_messageInfo_Document.Size(m) +} +func (m *Document) XXX_DiscardUnknown() { + xxx_messageInfo_Document.DiscardUnknown(m) +} + +var xxx_messageInfo_Document proto.InternalMessageInfo + +func (m *Document) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Document) GetFields() *any.Any { + if m != nil { + return m.Fields } return nil } -type IndexDocumentRequest struct { - Document *Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type BulkIndexRequest struct { + Documents []*Document `protobuf:"bytes,1,rep,name=documents,proto3" json:"documents,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *IndexDocumentRequest) Reset() { *m = IndexDocumentRequest{} } -func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) } -func (*IndexDocumentRequest) ProtoMessage() {} -func (*IndexDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{13} +func (m *BulkIndexRequest) Reset() { *m = BulkIndexRequest{} } +func (m *BulkIndexRequest) String() string { return proto.CompactTextString(m) } +func (*BulkIndexRequest) ProtoMessage() {} +func (*BulkIndexRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{15} } -func (m *IndexDocumentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexDocumentRequest.Unmarshal(m, b) +func (m *BulkIndexRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkIndexRequest.Unmarshal(m, b) } -func (m *IndexDocumentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexDocumentRequest.Marshal(b, m, deterministic) +func (m *BulkIndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkIndexRequest.Marshal(b, m, deterministic) } -func (m *IndexDocumentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexDocumentRequest.Merge(m, src) +func (m *BulkIndexRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkIndexRequest.Merge(m, src) } -func (m *IndexDocumentRequest) XXX_Size() int { - return xxx_messageInfo_IndexDocumentRequest.Size(m) +func (m *BulkIndexRequest) XXX_Size() int { + return xxx_messageInfo_BulkIndexRequest.Size(m) } -func (m *IndexDocumentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_IndexDocumentRequest.DiscardUnknown(m) +func (m *BulkIndexRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BulkIndexRequest.DiscardUnknown(m) } -var xxx_messageInfo_IndexDocumentRequest proto.InternalMessageInfo +var xxx_messageInfo_BulkIndexRequest proto.InternalMessageInfo -func (m *IndexDocumentRequest) GetDocument() *Document { +func (m *BulkIndexRequest) GetDocuments() []*Document { if m != nil { - return m.Document + return m.Documents } return nil } -type IndexDocumentResponse struct { +type BulkIndexResponse struct { Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *IndexDocumentResponse) Reset() { *m = IndexDocumentResponse{} } -func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) } -func (*IndexDocumentResponse) ProtoMessage() {} -func (*IndexDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{14} +func (m *BulkIndexResponse) Reset() { *m = BulkIndexResponse{} } +func (m *BulkIndexResponse) String() string { return proto.CompactTextString(m) } +func (*BulkIndexResponse) ProtoMessage() {} +func (*BulkIndexResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{16} } -func (m *IndexDocumentResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexDocumentResponse.Unmarshal(m, b) +func (m *BulkIndexResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkIndexResponse.Unmarshal(m, b) } -func (m *IndexDocumentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexDocumentResponse.Marshal(b, m, deterministic) +func (m *BulkIndexResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkIndexResponse.Marshal(b, m, deterministic) } -func (m *IndexDocumentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexDocumentResponse.Merge(m, src) +func (m *BulkIndexResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkIndexResponse.Merge(m, src) } -func (m *IndexDocumentResponse) XXX_Size() int { - return xxx_messageInfo_IndexDocumentResponse.Size(m) +func (m *BulkIndexResponse) XXX_Size() int { + return xxx_messageInfo_BulkIndexResponse.Size(m) } -func (m *IndexDocumentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_IndexDocumentResponse.DiscardUnknown(m) +func (m *BulkIndexResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BulkIndexResponse.DiscardUnknown(m) } -var xxx_messageInfo_IndexDocumentResponse proto.InternalMessageInfo +var xxx_messageInfo_BulkIndexResponse proto.InternalMessageInfo -func (m *IndexDocumentResponse) GetCount() int32 { +func (m *BulkIndexResponse) GetCount() int32 { if m != nil { return m.Count } return 0 } -type DeleteDocumentRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +type BulkDeleteRequest struct { + Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *DeleteDocumentRequest) Reset() { *m = DeleteDocumentRequest{} } -func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteDocumentRequest) ProtoMessage() {} -func (*DeleteDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{15} +func (m *BulkDeleteRequest) Reset() { *m = BulkDeleteRequest{} } +func (m *BulkDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*BulkDeleteRequest) ProtoMessage() {} +func (*BulkDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{17} } -func (m *DeleteDocumentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteDocumentRequest.Unmarshal(m, b) +func (m *BulkDeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkDeleteRequest.Unmarshal(m, b) } -func (m *DeleteDocumentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteDocumentRequest.Marshal(b, m, deterministic) +func (m *BulkDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkDeleteRequest.Marshal(b, m, deterministic) } -func (m *DeleteDocumentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteDocumentRequest.Merge(m, src) +func (m *BulkDeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkDeleteRequest.Merge(m, src) } -func (m *DeleteDocumentRequest) XXX_Size() int { - return xxx_messageInfo_DeleteDocumentRequest.Size(m) +func (m *BulkDeleteRequest) XXX_Size() int { + return xxx_messageInfo_BulkDeleteRequest.Size(m) } -func (m *DeleteDocumentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteDocumentRequest.DiscardUnknown(m) +func (m *BulkDeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BulkDeleteRequest.DiscardUnknown(m) } -var xxx_messageInfo_DeleteDocumentRequest proto.InternalMessageInfo +var xxx_messageInfo_BulkDeleteRequest proto.InternalMessageInfo -func (m *DeleteDocumentRequest) GetId() string { +func (m *BulkDeleteRequest) GetIds() []string { if m != nil { - return m.Id + return m.Ids } - return "" + return nil } -type DeleteDocumentResponse struct { +type BulkDeleteResponse struct { Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *DeleteDocumentResponse) Reset() { *m = DeleteDocumentResponse{} } -func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteDocumentResponse) ProtoMessage() {} -func (*DeleteDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{16} +func (m *BulkDeleteResponse) Reset() { *m = BulkDeleteResponse{} } +func (m *BulkDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*BulkDeleteResponse) ProtoMessage() {} +func (*BulkDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{18} } -func (m *DeleteDocumentResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteDocumentResponse.Unmarshal(m, b) +func (m *BulkDeleteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkDeleteResponse.Unmarshal(m, b) } -func (m *DeleteDocumentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteDocumentResponse.Marshal(b, m, deterministic) +func (m *BulkDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkDeleteResponse.Marshal(b, m, deterministic) } -func (m *DeleteDocumentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteDocumentResponse.Merge(m, src) +func (m *BulkDeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkDeleteResponse.Merge(m, src) } -func (m *DeleteDocumentResponse) XXX_Size() int { - return xxx_messageInfo_DeleteDocumentResponse.Size(m) +func (m *BulkDeleteResponse) XXX_Size() int { + return xxx_messageInfo_BulkDeleteResponse.Size(m) } -func (m *DeleteDocumentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteDocumentResponse.DiscardUnknown(m) +func (m *BulkDeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BulkDeleteResponse.DiscardUnknown(m) } -var xxx_messageInfo_DeleteDocumentResponse proto.InternalMessageInfo +var xxx_messageInfo_BulkDeleteResponse proto.InternalMessageInfo -func (m *DeleteDocumentResponse) GetCount() int32 { +func (m *BulkDeleteResponse) GetCount() int32 { if m != nil { return m.Count } @@ -884,7 +1028,7 @@ func (m *SearchRequest) Reset() { *m = SearchRequest{} } func (m *SearchRequest) String() string { return proto.CompactTextString(m) } func (*SearchRequest) ProtoMessage() {} func (*SearchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{17} + return fileDescriptor_7b2daf652facb3ae, []int{19} } func (m *SearchRequest) XXX_Unmarshal(b []byte) error { @@ -923,7 +1067,7 @@ func (m *SearchResponse) Reset() { *m = SearchResponse{} } func (m *SearchResponse) String() string { return proto.CompactTextString(m) } func (*SearchResponse) ProtoMessage() {} func (*SearchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{18} + return fileDescriptor_7b2daf652facb3ae, []int{20} } func (m *SearchResponse) XXX_Unmarshal(b []byte) error { @@ -964,7 +1108,7 @@ func (m *IndexConfig) Reset() { *m = IndexConfig{} } func (m *IndexConfig) String() string { return proto.CompactTextString(m) } func (*IndexConfig) ProtoMessage() {} func (*IndexConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{19} + return fileDescriptor_7b2daf652facb3ae, []int{21} } func (m *IndexConfig) XXX_Unmarshal(b []byte) error { @@ -1017,7 +1161,7 @@ func (m *GetIndexConfigResponse) Reset() { *m = GetIndexConfigResponse{} func (m *GetIndexConfigResponse) String() string { return proto.CompactTextString(m) } func (*GetIndexConfigResponse) ProtoMessage() {} func (*GetIndexConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{20} + return fileDescriptor_7b2daf652facb3ae, []int{22} } func (m *GetIndexConfigResponse) XXX_Unmarshal(b []byte) error { @@ -1056,7 +1200,7 @@ func (m *GetIndexStatsResponse) Reset() { *m = GetIndexStatsResponse{} } func (m *GetIndexStatsResponse) String() string { return proto.CompactTextString(m) } func (*GetIndexStatsResponse) ProtoMessage() {} func (*GetIndexStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{21} + return fileDescriptor_7b2daf652facb3ae, []int{23} } func (m *GetIndexStatsResponse) XXX_Unmarshal(b []byte) error { @@ -1084,11 +1228,91 @@ func (m *GetIndexStatsResponse) GetIndexStats() *any.Any { return nil } +type Proposal struct { + Event Proposal_Event `protobuf:"varint,1,opt,name=event,proto3,enum=index.Proposal_Event" json:"event,omitempty"` + Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` + Document *Document `protobuf:"bytes,3,opt,name=document,proto3" json:"document,omitempty"` + Id string `protobuf:"bytes,4,opt,name=id,proto3" json:"id,omitempty"` + Documents []*Document `protobuf:"bytes,5,rep,name=documents,proto3" json:"documents,omitempty"` + Ids []string `protobuf:"bytes,6,rep,name=ids,proto3" json:"ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Proposal) Reset() { *m = Proposal{} } +func (m *Proposal) String() string { return proto.CompactTextString(m) } +func (*Proposal) ProtoMessage() {} +func (*Proposal) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{24} +} + +func (m *Proposal) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Proposal.Unmarshal(m, b) +} +func (m *Proposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Proposal.Marshal(b, m, deterministic) +} +func (m *Proposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_Proposal.Merge(m, src) +} +func (m *Proposal) XXX_Size() int { + return xxx_messageInfo_Proposal.Size(m) +} +func (m *Proposal) XXX_DiscardUnknown() { + xxx_messageInfo_Proposal.DiscardUnknown(m) +} + +var xxx_messageInfo_Proposal proto.InternalMessageInfo + +func (m *Proposal) GetEvent() Proposal_Event { + if m != nil { + return m.Event + } + return Proposal_UNKNOWN +} + +func (m *Proposal) GetNode() *Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *Proposal) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *Proposal) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Proposal) GetDocuments() []*Document { + if m != nil { + return m.Documents + } + return nil +} + +func (m *Proposal) GetIds() []string { + if m != nil { + return m.Ids + } + return nil +} + func init() { proto.RegisterEnum("index.NodeHealthCheckRequest_Probe", NodeHealthCheckRequest_Probe_name, NodeHealthCheckRequest_Probe_value) proto.RegisterEnum("index.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) proto.RegisterEnum("index.Node_State", Node_State_name, Node_State_value) proto.RegisterEnum("index.ClusterWatchResponse_Event", ClusterWatchResponse_Event_name, ClusterWatchResponse_Event_value) + proto.RegisterEnum("index.Proposal_Event", Proposal_Event_name, Proposal_Event_value) proto.RegisterType((*NodeHealthCheckRequest)(nil), "index.NodeHealthCheckRequest") proto.RegisterType((*NodeHealthCheckResponse)(nil), "index.NodeHealthCheckResponse") proto.RegisterType((*Metadata)(nil), "index.Metadata") @@ -1100,96 +1324,118 @@ func init() { proto.RegisterType((*ClusterLeaveRequest)(nil), "index.ClusterLeaveRequest") proto.RegisterType((*ClusterInfoResponse)(nil), "index.ClusterInfoResponse") proto.RegisterType((*ClusterWatchResponse)(nil), "index.ClusterWatchResponse") + proto.RegisterType((*GetRequest)(nil), "index.GetRequest") + proto.RegisterType((*GetResponse)(nil), "index.GetResponse") + proto.RegisterType((*IndexRequest)(nil), "index.IndexRequest") + proto.RegisterType((*DeleteRequest)(nil), "index.DeleteRequest") proto.RegisterType((*Document)(nil), "index.Document") - proto.RegisterType((*GetDocumentRequest)(nil), "index.GetDocumentRequest") - proto.RegisterType((*GetDocumentResponse)(nil), "index.GetDocumentResponse") - proto.RegisterType((*IndexDocumentRequest)(nil), "index.IndexDocumentRequest") - proto.RegisterType((*IndexDocumentResponse)(nil), "index.IndexDocumentResponse") - proto.RegisterType((*DeleteDocumentRequest)(nil), "index.DeleteDocumentRequest") - proto.RegisterType((*DeleteDocumentResponse)(nil), "index.DeleteDocumentResponse") + proto.RegisterType((*BulkIndexRequest)(nil), "index.BulkIndexRequest") + proto.RegisterType((*BulkIndexResponse)(nil), "index.BulkIndexResponse") + proto.RegisterType((*BulkDeleteRequest)(nil), "index.BulkDeleteRequest") + proto.RegisterType((*BulkDeleteResponse)(nil), "index.BulkDeleteResponse") proto.RegisterType((*SearchRequest)(nil), "index.SearchRequest") proto.RegisterType((*SearchResponse)(nil), "index.SearchResponse") proto.RegisterType((*IndexConfig)(nil), "index.IndexConfig") proto.RegisterType((*GetIndexConfigResponse)(nil), "index.GetIndexConfigResponse") proto.RegisterType((*GetIndexStatsResponse)(nil), "index.GetIndexStatsResponse") + proto.RegisterType((*Proposal)(nil), "index.Proposal") } func init() { proto.RegisterFile("protobuf/index/index.proto", fileDescriptor_7b2daf652facb3ae) } var fileDescriptor_7b2daf652facb3ae = []byte{ - // 1137 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x57, 0x5d, 0x73, 0xda, 0x46, - 0x14, 0x45, 0x80, 0x6c, 0x7c, 0x65, 0x08, 0xdd, 0xd8, 0x4e, 0x42, 0xe2, 0x36, 0xd9, 0xa6, 0x0d, - 0x33, 0x49, 0xa1, 0xe3, 0x8c, 0x27, 0x4d, 0xda, 0x4e, 0x07, 0x83, 0x62, 0x13, 0x13, 0xe1, 0x0a, - 0x3b, 0x9e, 0xf4, 0xc5, 0x23, 0xd0, 0x1a, 0x34, 0x06, 0x49, 0x45, 0x8b, 0xa7, 0x3c, 0xf6, 0xb5, - 0xef, 0xfd, 0x0f, 0xed, 0xcf, 0xe9, 0x7b, 0x7f, 0x4c, 0x67, 0x3f, 0x24, 0x4b, 0x32, 0x90, 0xe9, - 0x8b, 0xc7, 0x7b, 0xef, 0xb9, 0x67, 0xcf, 0xbd, 0xbb, 0x7b, 0x64, 0x43, 0xc5, 0x9f, 0x7a, 0xd4, - 0xeb, 0xcf, 0x2e, 0xeb, 0x8e, 0x6b, 0x93, 0xdf, 0xc4, 0xcf, 0x1a, 0x0f, 0x22, 0x95, 0x2f, 0x2a, - 0x0f, 0x86, 0x9e, 0x37, 0x1c, 0x93, 0x7a, 0x84, 0xb4, 0xdc, 0xb9, 0x40, 0x54, 0x1e, 0xa6, 0x53, - 0x64, 0xe2, 0x53, 0x99, 0xc4, 0x7f, 0x28, 0xb0, 0x63, 0x78, 0x36, 0x39, 0x22, 0xd6, 0x98, 0x8e, - 0x9a, 0x23, 0x32, 0xb8, 0x32, 0xc9, 0xaf, 0x33, 0x12, 0x50, 0xf4, 0x1a, 0x54, 0x7f, 0xea, 0xf5, - 0xc9, 0x7d, 0xe5, 0xb1, 0x52, 0x2d, 0xed, 0x7d, 0x59, 0x13, 0xdb, 0x2e, 0x46, 0xd7, 0x4e, 0x18, - 0xd4, 0x14, 0x15, 0x78, 0x1f, 0x54, 0xbe, 0x46, 0x77, 0x40, 0x3b, 0xd2, 0x1b, 0x9d, 0xd3, 0xa3, - 0xb6, 0xa1, 0xf7, 0x7a, 0xe5, 0x0c, 0xda, 0x84, 0x42, 0xa7, 0xfd, 0x41, 0xe7, 0x2b, 0x05, 0x15, - 0x61, 0xc3, 0xd4, 0x1b, 0x2d, 0x91, 0xcc, 0xe2, 0xbf, 0x15, 0xb8, 0x77, 0x8b, 0x3e, 0xf0, 0x3d, - 0x37, 0x20, 0xe8, 0x0d, 0xa8, 0x01, 0xb5, 0x68, 0xa8, 0xe6, 0xe9, 0x32, 0x35, 0x02, 0x5e, 0xeb, - 0x31, 0xac, 0x29, 0x4a, 0xb0, 0x09, 0x2a, 0x5f, 0x23, 0x0d, 0xd6, 0x85, 0x9c, 0x8f, 0xe5, 0x0c, - 0xdb, 0xfc, 0xcc, 0x08, 0x97, 0x0a, 0xda, 0x00, 0xb5, 0xc1, 0xa4, 0x95, 0xb3, 0xa8, 0x00, 0xf9, - 0x96, 0xde, 0x68, 0x95, 0x73, 0x2c, 0xc8, 0x04, 0x7e, 0x2c, 0xe7, 0x19, 0xdc, 0xe8, 0x9e, 0x5e, - 0x88, 0xa5, 0x8a, 0x4f, 0xa0, 0xf0, 0x9e, 0x50, 0xcb, 0xb6, 0xa8, 0x85, 0x9e, 0xc0, 0xe6, 0x70, - 0xea, 0x0f, 0x2e, 0x2c, 0xdb, 0x9e, 0x92, 0x20, 0xe0, 0x12, 0x37, 0x4c, 0x8d, 0xc5, 0x1a, 0x22, - 0xc4, 0x20, 0x23, 0x4a, 0xfd, 0x08, 0x92, 0x15, 0x10, 0x16, 0x93, 0x10, 0xfc, 0xaf, 0x02, 0x79, - 0xd6, 0x0e, 0x2a, 0x41, 0xd6, 0xb1, 0x25, 0x49, 0xd6, 0xb1, 0x59, 0x6d, 0xdf, 0x71, 0xed, 0x74, - 0x2d, 0x8b, 0x85, 0xf4, 0xcf, 0xc2, 0xe9, 0xe4, 0xf8, 0x74, 0x3e, 0x8b, 0x4d, 0x27, 0x31, 0x0a, - 0xf4, 0x1c, 0x0a, 0x13, 0x29, 0xfb, 0x7e, 0xfe, 0xb1, 0x52, 0xd5, 0xf6, 0xee, 0x48, 0x6c, 0xd8, - 0x8d, 0x19, 0x01, 0xf0, 0x71, 0x6c, 0x6e, 0x67, 0xc6, 0xb1, 0xd1, 0x3d, 0x37, 0xc4, 0x11, 0xbe, - 0xed, 0x76, 0x3a, 0xdd, 0x73, 0xdd, 0x14, 0x47, 0xd8, 0x6c, 0x18, 0xad, 0x76, 0xab, 0x71, 0xca, - 0x46, 0x07, 0xb0, 0xd6, 0xd1, 0x1b, 0x2d, 0xdd, 0x2c, 0xe7, 0x18, 0xb0, 0x77, 0x74, 0x76, 0xda, - 0x62, 0x65, 0x79, 0xfc, 0xbb, 0x02, 0xeb, 0xcd, 0xf1, 0x2c, 0xa0, 0x64, 0x8a, 0xea, 0xa0, 0xba, - 0x9e, 0x4d, 0xd8, 0xa4, 0x72, 0x55, 0x6d, 0xef, 0x81, 0x94, 0x20, 0xd3, 0x5c, 0x76, 0xa0, 0xbb, - 0x74, 0x3a, 0x37, 0x05, 0xae, 0xa2, 0x03, 0xdc, 0x04, 0x51, 0x19, 0x72, 0x57, 0x64, 0x2e, 0x27, - 0xc4, 0x7e, 0x45, 0x4f, 0x40, 0xbd, 0xb6, 0xc6, 0x33, 0xc2, 0x67, 0xa3, 0xed, 0x69, 0xb1, 0xfe, - 0x4d, 0x91, 0x79, 0x93, 0xfd, 0x4e, 0xc1, 0x2f, 0xa1, 0xcc, 0x42, 0x6d, 0xf7, 0xd2, 0x8b, 0x2e, - 0xd6, 0x17, 0x90, 0x67, 0x7b, 0x70, 0xb6, 0x54, 0x25, 0x4f, 0xe0, 0x7d, 0x40, 0x52, 0xd8, 0x3b, - 0xcf, 0x71, 0xc3, 0xd7, 0xf1, 0xc9, 0xb2, 0xaf, 0xe0, 0xae, 0x2c, 0xeb, 0x10, 0xeb, 0x9a, 0x84, - 0x75, 0xa9, 0xc3, 0xc5, 0x3f, 0x45, 0xb0, 0x84, 0xaa, 0x2a, 0xac, 0x0f, 0x44, 0x58, 0xee, 0x50, - 0x4a, 0xce, 0xc8, 0x0c, 0xd3, 0xf8, 0x1f, 0x05, 0xb6, 0x64, 0xf0, 0xdc, 0xa2, 0x83, 0x51, 0x44, - 0xf1, 0x0a, 0x54, 0x72, 0x4d, 0x5c, 0x2a, 0x5f, 0xcc, 0x93, 0x24, 0x41, 0x02, 0x5b, 0xd3, 0x19, - 0xd0, 0x14, 0xf8, 0xa8, 0xb5, 0xec, 0x92, 0xd6, 0xe2, 0xe2, 0x72, 0xab, 0xc5, 0xed, 0x83, 0xca, - 0xa9, 0x93, 0x37, 0xa8, 0x00, 0xf9, 0x77, 0xdd, 0xb6, 0x21, 0x1e, 0x5d, 0x47, 0x6f, 0x7c, 0x90, - 0x37, 0xe7, 0xec, 0x84, 0xdf, 0xa2, 0x1c, 0x3e, 0x82, 0x42, 0xcb, 0x1b, 0xcc, 0x26, 0xac, 0x32, - 0xfd, 0x1a, 0x5e, 0xc0, 0xda, 0xa5, 0x43, 0xc6, 0x76, 0x20, 0xf5, 0x6d, 0xd5, 0x84, 0xbf, 0xd5, - 0x42, 0x7f, 0xab, 0x35, 0xdc, 0xb9, 0x29, 0x31, 0xf8, 0x29, 0xa0, 0x43, 0x42, 0x43, 0xb2, 0x65, - 0x87, 0x70, 0x00, 0x77, 0x13, 0x28, 0x39, 0xc1, 0xe7, 0x50, 0xb0, 0x65, 0x4c, 0x9e, 0x42, 0xf8, - 0x58, 0x22, 0x68, 0x04, 0xc0, 0x4d, 0xd8, 0x6a, 0xb3, 0x5c, 0x7a, 0xaf, 0xff, 0x45, 0xf2, 0x0d, - 0x6c, 0xa7, 0x48, 0xa4, 0x94, 0x2d, 0x50, 0x07, 0xde, 0x4c, 0x52, 0xa8, 0xa6, 0x58, 0xe0, 0x67, - 0xb0, 0xdd, 0x22, 0x63, 0x42, 0xc9, 0xa7, 0x1a, 0xac, 0xc1, 0x4e, 0x1a, 0xb8, 0x92, 0xb8, 0x03, - 0xc5, 0x1e, 0xb1, 0xa6, 0xec, 0x86, 0x08, 0xc2, 0xef, 0xa1, 0x14, 0xf0, 0xc0, 0xc5, 0x54, 0x44, - 0x64, 0x2f, 0x8b, 0xa7, 0x5f, 0x0c, 0xe2, 0xc5, 0xf8, 0x18, 0x4a, 0x21, 0x9b, 0xdc, 0xf5, 0x35, - 0x14, 0x23, 0xba, 0x60, 0x36, 0x5e, 0xcd, 0xb6, 0x19, 0xb2, 0x31, 0x24, 0xfe, 0x53, 0x01, 0x8d, - 0xcf, 0xa8, 0xe9, 0xb9, 0x97, 0xce, 0x90, 0x51, 0xf1, 0x71, 0x5e, 0x4c, 0x2c, 0xdf, 0x77, 0xdc, - 0xe1, 0x6a, 0x2a, 0x0e, 0x7d, 0x2f, 0x90, 0x68, 0x17, 0x40, 0x94, 0xd2, 0xb9, 0x4f, 0xa4, 0xad, - 0x6e, 0xf0, 0xc8, 0xe9, 0xdc, 0x27, 0xe8, 0x05, 0x20, 0x91, 0x0e, 0xa8, 0x37, 0xb5, 0x86, 0x44, - 0xc0, 0x72, 0x1c, 0x56, 0xe6, 0x99, 0x9e, 0x48, 0x30, 0x34, 0xee, 0xc2, 0xce, 0x21, 0xa1, 0x31, - 0x65, 0x51, 0xb3, 0xfb, 0x20, 0xb6, 0xbd, 0x18, 0xf0, 0xb8, 0x14, 0x88, 0xe4, 0x2d, 0x88, 0x57, - 0x68, 0xce, 0xcd, 0x02, 0x1b, 0xb0, 0x1d, 0x12, 0x32, 0x17, 0x0e, 0x62, 0x7c, 0x5a, 0xa8, 0xcb, - 0xa2, 0xc1, 0xca, 0x7e, 0xc1, 0x89, 0xca, 0xf7, 0xfe, 0x5a, 0x07, 0x95, 0xb3, 0x21, 0x13, 0xee, - 0xa4, 0xbe, 0x9b, 0x68, 0x77, 0xe5, 0xd7, 0xbd, 0xf2, 0xf9, 0xea, 0xcf, 0x2d, 0xce, 0xa0, 0x1f, - 0xa1, 0x10, 0x5a, 0x2b, 0xda, 0xb9, 0xa5, 0x45, 0x67, 0x7f, 0x72, 0x54, 0xee, 0xc5, 0x58, 0xe2, - 0x6e, 0x87, 0x33, 0xe8, 0x00, 0xb4, 0x98, 0xc9, 0xa2, 0xd4, 0x17, 0x21, 0x66, 0xbc, 0x95, 0x25, - 0xe4, 0x38, 0x83, 0x5a, 0xb0, 0x19, 0x77, 0x5c, 0x54, 0x49, 0x92, 0xc4, 0x6d, 0x78, 0x05, 0x4b, - 0x33, 0x52, 0xb2, 0xb2, 0x97, 0x14, 0x79, 0xaa, 0x9d, 0xc3, 0x48, 0x0a, 0xf7, 0xd9, 0xa5, 0x2c, - 0x0f, 0x57, 0x98, 0x32, 0xce, 0x7c, 0xab, 0xa0, 0xb7, 0xa0, 0xc5, 0x9c, 0x29, 0x9a, 0xcb, 0x6d, - 0x4f, 0x8b, 0x04, 0x2d, 0x30, 0x32, 0x9c, 0x41, 0x06, 0x14, 0x13, 0xc6, 0x82, 0x1e, 0xc6, 0xaf, - 0x5f, 0x9a, 0xeb, 0xd1, 0xe2, 0x64, 0xc8, 0x56, 0x55, 0xd0, 0xcf, 0x50, 0x4a, 0x1a, 0x0a, 0x0a, - 0x6b, 0x16, 0x1a, 0x52, 0x65, 0x77, 0x49, 0x36, 0x46, 0xf9, 0x0a, 0xd6, 0x84, 0x4b, 0xa0, 0x2d, - 0x09, 0x4e, 0x58, 0x50, 0x65, 0x3b, 0x15, 0x8d, 0x7a, 0x6b, 0x43, 0x29, 0xf9, 0xf2, 0x96, 0x8e, - 0x7b, 0xf7, 0x66, 0x46, 0x0b, 0x1e, 0x2a, 0x3f, 0xb7, 0x62, 0xe2, 0xcd, 0x2d, 0x65, 0x7a, 0x94, - 0x62, 0x4a, 0xbc, 0x50, 0x9c, 0x41, 0x3f, 0x40, 0xa1, 0xe7, 0x5a, 0x7e, 0x30, 0xf2, 0xe8, 0x52, - 0x8e, 0xa5, 0x77, 0xf0, 0xa0, 0xfa, 0xcb, 0xd7, 0x43, 0x87, 0x8e, 0x66, 0xfd, 0xda, 0xc0, 0x9b, - 0xd4, 0x27, 0x5e, 0x30, 0xbb, 0xb2, 0xea, 0xfd, 0xb1, 0x15, 0xd0, 0x7a, 0xf2, 0x5f, 0x81, 0xfe, - 0x1a, 0x5f, 0xbf, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x14, 0x4f, 0xc0, 0x27, 0x23, 0x0c, 0x00, - 0x00, + // 1454 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0xdf, 0x72, 0xda, 0xc6, + 0x17, 0xb6, 0x00, 0x61, 0x7c, 0x04, 0x58, 0xd9, 0x60, 0x3b, 0x51, 0xec, 0x5f, 0xe2, 0xfd, 0x35, + 0xad, 0x4b, 0x5a, 0x48, 0x9d, 0x66, 0xda, 0x38, 0xed, 0x74, 0xb0, 0x51, 0x6d, 0x62, 0x02, 0x19, + 0x81, 0x93, 0x26, 0x33, 0x1d, 0x46, 0xc0, 0x1a, 0x54, 0x63, 0x89, 0x22, 0xe1, 0x96, 0xe9, 0xf4, + 0xa2, 0x79, 0x85, 0x4e, 0xa7, 0x6f, 0xd2, 0x17, 0xe8, 0x13, 0x74, 0x7a, 0x9b, 0xcb, 0x3e, 0x48, + 0x67, 0xff, 0x48, 0x48, 0xd8, 0x90, 0x76, 0x7a, 0xe3, 0x61, 0xcf, 0xf9, 0xce, 0xb7, 0xdf, 0x39, + 0x3a, 0xbb, 0x67, 0x0d, 0xda, 0x70, 0xe4, 0x78, 0x4e, 0x7b, 0x7c, 0x5a, 0xb4, 0xec, 0x2e, 0xf9, + 0x9e, 0xff, 0x2d, 0x30, 0x23, 0x92, 0xd9, 0x42, 0xbb, 0xd9, 0x73, 0x9c, 0xde, 0x80, 0x14, 0x03, + 0xa4, 0x69, 0x4f, 0x38, 0x42, 0xbb, 0x35, 0xeb, 0x22, 0xe7, 0x43, 0xcf, 0x77, 0x6e, 0x0a, 0xa7, + 0x39, 0xb4, 0x8a, 0xa6, 0x6d, 0x3b, 0x9e, 0xe9, 0x59, 0x8e, 0xed, 0x72, 0x2f, 0xfe, 0x55, 0x82, + 0xf5, 0x9a, 0xd3, 0x25, 0x47, 0xc4, 0x1c, 0x78, 0xfd, 0x83, 0x3e, 0xe9, 0x9c, 0x19, 0xe4, 0xdb, + 0x31, 0x71, 0x3d, 0xf4, 0x08, 0xe4, 0xe1, 0xc8, 0x69, 0x93, 0x1b, 0xd2, 0x1d, 0x69, 0x27, 0xbb, + 0xfb, 0xff, 0x02, 0x17, 0x75, 0x35, 0xba, 0xf0, 0x8c, 0x42, 0x0d, 0x1e, 0x81, 0xf7, 0x41, 0x66, + 0x6b, 0xa4, 0xc0, 0xf2, 0x49, 0xed, 0xb8, 0x56, 0x7f, 0x51, 0x53, 0x97, 0xd0, 0x2a, 0x28, 0x47, + 0x7a, 0xa9, 0xda, 0x3c, 0xaa, 0xd4, 0xf4, 0x46, 0x43, 0x95, 0x50, 0x1a, 0x52, 0xd5, 0xca, 0x73, + 0x9d, 0xad, 0x62, 0x28, 0x03, 0x2b, 0x86, 0x5e, 0x2a, 0x73, 0x67, 0x1c, 0xff, 0x26, 0xc1, 0xc6, + 0xa5, 0xbd, 0xdc, 0xa1, 0x63, 0xbb, 0x04, 0xed, 0x81, 0xec, 0x7a, 0xa6, 0xe7, 0x4b, 0x7b, 0x67, + 0x9e, 0x34, 0x0e, 0x2f, 0x34, 0x28, 0xd6, 0xe0, 0x21, 0xb8, 0x05, 0x32, 0x5b, 0x47, 0xb5, 0x29, + 0xb0, 0xcc, 0xb5, 0xbd, 0x54, 0x25, 0xaa, 0xe4, 0xa4, 0xe6, 0x2f, 0x63, 0x68, 0x05, 0xe4, 0x12, + 0xd5, 0xa9, 0xc6, 0x51, 0x0a, 0x12, 0x65, 0xbd, 0x54, 0x56, 0x13, 0xd4, 0x48, 0xd5, 0xbe, 0x54, + 0x65, 0x0a, 0xaf, 0xd5, 0x9b, 0x2d, 0xbe, 0x4c, 0xe2, 0xd7, 0x12, 0xa4, 0x9e, 0x12, 0xcf, 0xec, + 0x9a, 0x9e, 0x89, 0xb6, 0x21, 0xdd, 0x1b, 0x0d, 0x3b, 0x2d, 0xb3, 0xdb, 0x1d, 0x11, 0xd7, 0x65, + 0x82, 0x57, 0x0c, 0x85, 0xda, 0x4a, 0xdc, 0x84, 0xee, 0x43, 0x8e, 0x41, 0x7a, 0xa6, 0x47, 0xbe, + 0x33, 0x27, 0x01, 0x34, 0xc6, 0xa0, 0x88, 0xfa, 0x0e, 0xb9, 0xcb, 0x8f, 0xd8, 0x86, 0x74, 0xdf, + 0xf3, 0x86, 0x01, 0x32, 0xce, 0x49, 0xa9, 0x4d, 0x40, 0xf0, 0x1b, 0x09, 0x12, 0xb4, 0x1c, 0x28, + 0x0b, 0x31, 0xab, 0x2b, 0xb6, 0x8d, 0x59, 0x5d, 0x1a, 0xdb, 0xb6, 0xec, 0xee, 0xcc, 0x2e, 0x0a, + 0xb5, 0xf9, 0xf4, 0xef, 0xf9, 0xd5, 0x8d, 0xb3, 0xea, 0x5e, 0x0b, 0x55, 0x37, 0x52, 0x4a, 0x74, + 0x0f, 0x52, 0xe7, 0x22, 0xd1, 0x1b, 0x89, 0x3b, 0xd2, 0x8e, 0xb2, 0xbb, 0x2a, 0xb0, 0x7e, 0xfe, + 0x46, 0x00, 0xc0, 0xc7, 0x57, 0xd6, 0x3d, 0x0d, 0xa9, 0x2f, 0xeb, 0xd5, 0x6a, 0xfd, 0x85, 0x6e, + 0xf0, 0xc2, 0x1f, 0x94, 0x6a, 0xe5, 0x4a, 0xb9, 0xd4, 0xd4, 0xd5, 0x18, 0x02, 0x48, 0x56, 0xf5, + 0x52, 0x59, 0x37, 0xd4, 0x38, 0x05, 0x36, 0x8e, 0x4e, 0x9a, 0x65, 0x1a, 0x96, 0xc0, 0x3f, 0x49, + 0xb0, 0x7c, 0x30, 0x18, 0xbb, 0x1e, 0x19, 0xa1, 0x22, 0xc8, 0xb6, 0xd3, 0x25, 0xb4, 0xb6, 0xf1, + 0x1d, 0x65, 0xf7, 0xa6, 0x90, 0x20, 0xdc, 0x4c, 0xb6, 0xab, 0xdb, 0xde, 0x68, 0x62, 0x70, 0x9c, + 0xa6, 0x03, 0x4c, 0x8d, 0x48, 0x85, 0xf8, 0x19, 0x99, 0x88, 0x0a, 0xd1, 0x9f, 0x68, 0x1b, 0xe4, + 0x0b, 0x73, 0x30, 0x26, 0xac, 0x36, 0xca, 0xae, 0x12, 0xca, 0xdf, 0xe0, 0x9e, 0xbd, 0xd8, 0xa7, + 0x12, 0x7e, 0x00, 0x2a, 0x35, 0x55, 0xec, 0x53, 0x27, 0x68, 0xcc, 0xdb, 0x90, 0xa0, 0x7b, 0x30, + 0xb6, 0x99, 0x48, 0xe6, 0xc0, 0x0f, 0x01, 0x09, 0x61, 0x4f, 0x1c, 0xcb, 0xf6, 0x8f, 0xda, 0x5b, + 0xc3, 0xee, 0xc2, 0x75, 0x11, 0x56, 0x25, 0xe6, 0x05, 0xf1, 0xe3, 0x66, 0x3e, 0x2e, 0xfe, 0x22, + 0x80, 0x45, 0x54, 0xed, 0xc0, 0x72, 0x87, 0x9b, 0xc5, 0x0e, 0xd9, 0x68, 0x8d, 0x0c, 0xdf, 0x8d, + 0xff, 0x90, 0x20, 0x27, 0x8c, 0x2f, 0x4c, 0xaf, 0xd3, 0x0f, 0x28, 0x3e, 0x01, 0x99, 0x5c, 0x10, + 0xdb, 0x13, 0x27, 0x6e, 0x3b, 0x4a, 0x10, 0xc1, 0x16, 0x74, 0x0a, 0x34, 0x38, 0x3e, 0x48, 0x2d, + 0x36, 0x27, 0xb5, 0xb0, 0xb8, 0xf8, 0x62, 0x71, 0x0f, 0x41, 0x66, 0xd4, 0xd1, 0x0e, 0x4a, 0x41, + 0xe2, 0x49, 0xbd, 0x52, 0x53, 0x25, 0x7a, 0x24, 0xab, 0x7a, 0xe9, 0xb9, 0xe8, 0x9c, 0x93, 0x67, + 0xac, 0x8b, 0xe2, 0x78, 0x13, 0xe0, 0x90, 0x78, 0xf3, 0x4a, 0xf6, 0x18, 0x14, 0xe6, 0x15, 0x79, + 0x7e, 0x00, 0xc9, 0x53, 0x8b, 0x0c, 0xba, 0xae, 0xa8, 0x54, 0xae, 0xc0, 0xaf, 0xcf, 0x82, 0x7f, + 0xb7, 0x16, 0x4a, 0xf6, 0xc4, 0x10, 0x18, 0x5c, 0x85, 0x74, 0x85, 0x6a, 0x9d, 0x43, 0x1e, 0x62, + 0x8b, 0xfd, 0x03, 0xb6, 0xdb, 0x90, 0x29, 0x93, 0x01, 0xf1, 0xe6, 0x7e, 0xde, 0x23, 0x48, 0x95, + 0x9d, 0xce, 0xf8, 0x9c, 0xd6, 0xe0, 0xbf, 0x6d, 0x55, 0x02, 0x75, 0x7f, 0x3c, 0x38, 0x8b, 0x88, + 0xff, 0x10, 0x56, 0xba, 0x82, 0xdd, 0x3f, 0x4b, 0xfe, 0x71, 0xf6, 0x77, 0x35, 0xa6, 0x08, 0xfc, + 0x3e, 0x5c, 0x0b, 0x51, 0x88, 0xf2, 0xe5, 0x40, 0xee, 0x38, 0x63, 0xd1, 0x26, 0xb2, 0xc1, 0x17, + 0xf8, 0x2e, 0x87, 0x46, 0x93, 0x53, 0x21, 0x6e, 0x75, 0xf9, 0x46, 0x2b, 0x06, 0xfd, 0x89, 0xf3, + 0x80, 0xc2, 0xb0, 0x85, 0x94, 0x55, 0xc8, 0x34, 0x88, 0x39, 0xa2, 0x5d, 0xc7, 0xe9, 0x1e, 0x43, + 0xd6, 0x65, 0x86, 0xd6, 0x88, 0x5b, 0x16, 0x7e, 0xc0, 0x8c, 0x1b, 0x0e, 0xc6, 0xc7, 0x90, 0xf5, + 0xd9, 0xc4, 0xae, 0x8f, 0x20, 0x13, 0xd0, 0xb9, 0xe3, 0xc1, 0x62, 0xb6, 0xb4, 0xcf, 0x46, 0x91, + 0xf8, 0x17, 0x09, 0x14, 0x56, 0x95, 0x03, 0xc7, 0x3e, 0xb5, 0x7a, 0x94, 0x8a, 0x55, 0xb1, 0x75, + 0x6e, 0x0e, 0x87, 0x96, 0xdd, 0x5b, 0x4c, 0xc5, 0xa0, 0x4f, 0x39, 0x12, 0x6d, 0x01, 0xf0, 0x50, + 0x6f, 0x32, 0x24, 0xe2, 0xaa, 0x5e, 0x61, 0x96, 0xe6, 0x64, 0x48, 0x9b, 0x15, 0x71, 0xb7, 0xeb, + 0x39, 0x23, 0xb3, 0x47, 0x38, 0x8c, 0x4f, 0x03, 0x95, 0x79, 0x1a, 0xdc, 0x41, 0xd1, 0xb8, 0x0e, + 0xeb, 0x87, 0xc4, 0x0b, 0x29, 0x0b, 0x92, 0x7d, 0x08, 0x7c, 0xdb, 0x56, 0x87, 0xd9, 0x85, 0x40, + 0x24, 0x3e, 0x7e, 0x38, 0x42, 0xb1, 0xa6, 0x0b, 0x5c, 0x83, 0x35, 0x9f, 0x90, 0xde, 0xec, 0x6e, + 0x88, 0x4f, 0xf1, 0x75, 0x99, 0xde, 0xe2, 0x93, 0x04, 0x56, 0x10, 0x8e, 0x7f, 0x8f, 0x41, 0xea, + 0xd9, 0xc8, 0x19, 0x3a, 0xae, 0x39, 0x40, 0xf7, 0xa2, 0x17, 0xce, 0x9a, 0x10, 0xe3, 0xfb, 0xff, + 0xe5, 0x25, 0x73, 0x0f, 0x52, 0x7e, 0xe7, 0x8a, 0x5b, 0xe6, 0x52, 0x6b, 0x07, 0x00, 0x71, 0xb4, + 0x12, 0xc1, 0xd1, 0x8a, 0x1c, 0x0c, 0xf9, 0x6d, 0x07, 0xc3, 0x6f, 0xec, 0xe4, 0xb4, 0xb1, 0xcf, + 0xae, 0xbc, 0xb8, 0xe8, 0x44, 0xd3, 0x9b, 0xad, 0x5a, 0xbd, 0xac, 0xab, 0x12, 0x7d, 0x1c, 0x95, + 0xf5, 0xaa, 0xde, 0xd4, 0xb9, 0x81, 0xbd, 0x3a, 0x2a, 0xb5, 0xb2, 0xfe, 0x95, 0x1a, 0xa7, 0xb7, + 0x19, 0xf7, 0xa9, 0x09, 0x94, 0x05, 0xd8, 0x3f, 0xa9, 0x1e, 0xb7, 0xb8, 0x4f, 0xa6, 0x71, 0x6c, + 0x2d, 0x00, 0xc9, 0xdd, 0x37, 0x34, 0x90, 0x8a, 0x43, 0x36, 0xac, 0xce, 0xbc, 0x88, 0xd0, 0xd6, + 0xc2, 0x47, 0x9c, 0xf6, 0xbf, 0xc5, 0x0f, 0x29, 0xbc, 0xf9, 0xfa, 0xcf, 0xbf, 0x7e, 0x8e, 0xad, + 0xa3, 0x5c, 0xf1, 0xe2, 0xa3, 0x22, 0x2d, 0x6c, 0xb1, 0xcf, 0x50, 0x1d, 0x46, 0xde, 0x84, 0x94, + 0x3f, 0x10, 0xd1, 0xfa, 0xa5, 0xaf, 0xad, 0xd3, 0x37, 0xa9, 0xb6, 0x11, 0xda, 0x21, 0x3c, 0xa3, + 0xf0, 0x06, 0xa3, 0xbe, 0x86, 0x56, 0x03, 0x6a, 0xda, 0x3c, 0x63, 0x17, 0xed, 0x83, 0x12, 0x9a, + 0x98, 0x68, 0x66, 0xbc, 0x87, 0xa6, 0xa8, 0x36, 0x67, 0x4f, 0xbc, 0x84, 0xca, 0x90, 0x0e, 0x8f, + 0x4f, 0xa4, 0x45, 0x49, 0xc2, 0x33, 0x75, 0x01, 0xcb, 0xd7, 0x81, 0x92, 0x85, 0x29, 0xce, 0x90, + 0x47, 0xb2, 0xd4, 0x58, 0x96, 0x39, 0x84, 0x68, 0x96, 0x62, 0xae, 0xf9, 0x89, 0x1e, 0x06, 0x22, + 0xd9, 0x38, 0x9d, 0xcb, 0x7f, 0x6b, 0xc1, 0xec, 0xc5, 0x4b, 0xf7, 0x25, 0x74, 0x0c, 0xf1, 0x43, + 0xe2, 0x21, 0xff, 0xdd, 0x36, 0x1d, 0x7e, 0x1a, 0x0a, 0x9b, 0x44, 0xc4, 0x16, 0x93, 0xb4, 0x81, + 0xd6, 0xa8, 0xa4, 0xa0, 0x8b, 0x8b, 0x3f, 0x58, 0xdd, 0xcf, 0xf3, 0xf9, 0x1f, 0xd1, 0x37, 0x7e, + 0x37, 0x5d, 0x0f, 0x5f, 0x07, 0x6f, 0x2b, 0xd6, 0xc7, 0x8c, 0xb4, 0xa0, 0x65, 0x22, 0xa4, 0x7b, + 0x52, 0xfe, 0x95, 0xa6, 0x5d, 0xbd, 0xd1, 0x9e, 0x94, 0x47, 0x27, 0x90, 0xe4, 0x97, 0x3f, 0xca, + 0xf9, 0xe7, 0x2b, 0x3c, 0x32, 0xe6, 0xee, 0x26, 0x52, 0xc8, 0xcf, 0x49, 0xa1, 0x01, 0x2b, 0xc1, + 0xa4, 0x42, 0x7e, 0x03, 0xce, 0x8e, 0x3f, 0xed, 0xc6, 0x65, 0x87, 0xa8, 0xd0, 0x75, 0x46, 0x9f, + 0xd1, 0x52, 0x94, 0xbe, 0x3d, 0x1e, 0x9c, 0x51, 0xad, 0xcf, 0x01, 0xa6, 0xc3, 0x0a, 0x85, 0x83, + 0xa3, 0x9a, 0x6f, 0x5e, 0xe1, 0x89, 0xf2, 0xe6, 0x23, 0xbc, 0x55, 0x48, 0xf2, 0x51, 0x14, 0xd4, + 0x20, 0x32, 0xe7, 0xb4, 0xb5, 0x19, 0xab, 0xe0, 0x5a, 0x63, 0x5c, 0xab, 0x18, 0x28, 0x17, 0x1f, + 0x47, 0x94, 0xad, 0x02, 0xd9, 0xe8, 0x9d, 0x3f, 0xb7, 0xab, 0xb6, 0xa6, 0xad, 0x71, 0xc5, 0x88, + 0xc0, 0x4b, 0xe8, 0x10, 0x32, 0x91, 0xdb, 0x7e, 0x2e, 0xd3, 0xe6, 0x0c, 0x53, 0x64, 0x36, 0xe0, + 0x25, 0xf4, 0x19, 0xa4, 0x1a, 0xb6, 0x39, 0x74, 0xfb, 0x8e, 0x37, 0x97, 0x63, 0xee, 0x21, 0xdc, + 0xdf, 0x79, 0xf5, 0x6e, 0xcf, 0xf2, 0xfa, 0xe3, 0x76, 0xa1, 0xe3, 0x9c, 0x17, 0xcf, 0x1d, 0x77, + 0x7c, 0x66, 0x16, 0xdb, 0x03, 0xd3, 0xf5, 0x8a, 0xd1, 0xff, 0xa1, 0xdb, 0x49, 0xb6, 0x7e, 0xf0, + 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd3, 0x30, 0x9b, 0x20, 0x5c, 0x0f, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1210,9 +1456,11 @@ type IndexClient interface { ClusterLeave(ctx context.Context, in *ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) ClusterInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterInfoResponse, error) ClusterWatch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_ClusterWatchClient, error) - GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) - IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Index_IndexDocumentClient, error) - DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Index_DeleteDocumentClient, error) + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) + Index(ctx context.Context, in *IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) + BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) + BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) GetIndexConfig(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexConfigResponse, error) GetIndexStats(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexStatsResponse, error) @@ -1304,81 +1552,49 @@ func (x *indexClusterWatchClient) Recv() (*ClusterWatchResponse, error) { return m, nil } -func (c *indexClient) GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) { - out := new(GetDocumentResponse) - err := c.cc.Invoke(ctx, "/index.Index/GetDocument", in, out, opts...) +func (c *indexClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { + out := new(GetResponse) + err := c.cc.Invoke(ctx, "/index.Index/Get", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *indexClient) IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Index_IndexDocumentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Index_serviceDesc.Streams[1], "/index.Index/IndexDocument", opts...) +func (c *indexClient) Index(ctx context.Context, in *IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Index", in, out, opts...) if err != nil { return nil, err } - x := &indexIndexDocumentClient{stream} - return x, nil -} - -type Index_IndexDocumentClient interface { - Send(*IndexDocumentRequest) error - CloseAndRecv() (*IndexDocumentResponse, error) - grpc.ClientStream -} - -type indexIndexDocumentClient struct { - grpc.ClientStream -} - -func (x *indexIndexDocumentClient) Send(m *IndexDocumentRequest) error { - return x.ClientStream.SendMsg(m) + return out, nil } -func (x *indexIndexDocumentClient) CloseAndRecv() (*IndexDocumentResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(IndexDocumentResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { +func (c *indexClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Delete", in, out, opts...) + if err != nil { return nil, err } - return m, nil + return out, nil } -func (c *indexClient) DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Index_DeleteDocumentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Index_serviceDesc.Streams[2], "/index.Index/DeleteDocument", opts...) +func (c *indexClient) BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) { + out := new(BulkIndexResponse) + err := c.cc.Invoke(ctx, "/index.Index/BulkIndex", in, out, opts...) if err != nil { return nil, err } - x := &indexDeleteDocumentClient{stream} - return x, nil -} - -type Index_DeleteDocumentClient interface { - Send(*DeleteDocumentRequest) error - CloseAndRecv() (*DeleteDocumentResponse, error) - grpc.ClientStream -} - -type indexDeleteDocumentClient struct { - grpc.ClientStream -} - -func (x *indexDeleteDocumentClient) Send(m *DeleteDocumentRequest) error { - return x.ClientStream.SendMsg(m) + return out, nil } -func (x *indexDeleteDocumentClient) CloseAndRecv() (*DeleteDocumentResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(DeleteDocumentResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { +func (c *indexClient) BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) { + out := new(BulkDeleteResponse) + err := c.cc.Invoke(ctx, "/index.Index/BulkDelete", in, out, opts...) + if err != nil { return nil, err } - return m, nil + return out, nil } func (c *indexClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { @@ -1425,15 +1641,67 @@ type IndexServer interface { ClusterLeave(context.Context, *ClusterLeaveRequest) (*empty.Empty, error) ClusterInfo(context.Context, *empty.Empty) (*ClusterInfoResponse, error) ClusterWatch(*empty.Empty, Index_ClusterWatchServer) error - GetDocument(context.Context, *GetDocumentRequest) (*GetDocumentResponse, error) - IndexDocument(Index_IndexDocumentServer) error - DeleteDocument(Index_DeleteDocumentServer) error + Get(context.Context, *GetRequest) (*GetResponse, error) + Index(context.Context, *IndexRequest) (*empty.Empty, error) + Delete(context.Context, *DeleteRequest) (*empty.Empty, error) + BulkIndex(context.Context, *BulkIndexRequest) (*BulkIndexResponse, error) + BulkDelete(context.Context, *BulkDeleteRequest) (*BulkDeleteResponse, error) Search(context.Context, *SearchRequest) (*SearchResponse, error) GetIndexConfig(context.Context, *empty.Empty) (*GetIndexConfigResponse, error) GetIndexStats(context.Context, *empty.Empty) (*GetIndexStatsResponse, error) Snapshot(context.Context, *empty.Empty) (*empty.Empty, error) } +// UnimplementedIndexServer can be embedded to have forward compatible implementations. +type UnimplementedIndexServer struct { +} + +func (*UnimplementedIndexServer) NodeHealthCheck(ctx context.Context, req *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeHealthCheck not implemented") +} +func (*UnimplementedIndexServer) NodeInfo(ctx context.Context, req *empty.Empty) (*NodeInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeInfo not implemented") +} +func (*UnimplementedIndexServer) ClusterJoin(ctx context.Context, req *ClusterJoinRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClusterJoin not implemented") +} +func (*UnimplementedIndexServer) ClusterLeave(ctx context.Context, req *ClusterLeaveRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClusterLeave not implemented") +} +func (*UnimplementedIndexServer) ClusterInfo(ctx context.Context, req *empty.Empty) (*ClusterInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClusterInfo not implemented") +} +func (*UnimplementedIndexServer) ClusterWatch(req *empty.Empty, srv Index_ClusterWatchServer) error { + return status.Errorf(codes.Unimplemented, "method ClusterWatch not implemented") +} +func (*UnimplementedIndexServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (*UnimplementedIndexServer) Index(ctx context.Context, req *IndexRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Index not implemented") +} +func (*UnimplementedIndexServer) Delete(ctx context.Context, req *DeleteRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (*UnimplementedIndexServer) BulkIndex(ctx context.Context, req *BulkIndexRequest) (*BulkIndexResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BulkIndex not implemented") +} +func (*UnimplementedIndexServer) BulkDelete(ctx context.Context, req *BulkDeleteRequest) (*BulkDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BulkDelete not implemented") +} +func (*UnimplementedIndexServer) Search(ctx context.Context, req *SearchRequest) (*SearchResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Search not implemented") +} +func (*UnimplementedIndexServer) GetIndexConfig(ctx context.Context, req *empty.Empty) (*GetIndexConfigResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetIndexConfig not implemented") +} +func (*UnimplementedIndexServer) GetIndexStats(ctx context.Context, req *empty.Empty) (*GetIndexStatsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetIndexStats not implemented") +} +func (*UnimplementedIndexServer) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Snapshot not implemented") +} + func RegisterIndexServer(s *grpc.Server, srv IndexServer) { s.RegisterService(&_Index_serviceDesc, srv) } @@ -1549,74 +1817,94 @@ func (x *indexClusterWatchServer) Send(m *ClusterWatchResponse) error { return x.ServerStream.SendMsg(m) } -func _Index_GetDocument_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetDocumentRequest) +func _Index_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(IndexServer).GetDocument(ctx, in) + return srv.(IndexServer).Get(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Index/GetDocument", + FullMethod: "/index.Index/Get", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).GetDocument(ctx, req.(*GetDocumentRequest)) + return srv.(IndexServer).Get(ctx, req.(*GetRequest)) } return interceptor(ctx, in, info, handler) } -func _Index_IndexDocument_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(IndexServer).IndexDocument(&indexIndexDocumentServer{stream}) -} - -type Index_IndexDocumentServer interface { - SendAndClose(*IndexDocumentResponse) error - Recv() (*IndexDocumentRequest, error) - grpc.ServerStream -} - -type indexIndexDocumentServer struct { - grpc.ServerStream -} - -func (x *indexIndexDocumentServer) SendAndClose(m *IndexDocumentResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *indexIndexDocumentServer) Recv() (*IndexDocumentRequest, error) { - m := new(IndexDocumentRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { +func _Index_Index_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(IndexRequest) + if err := dec(in); err != nil { return nil, err } - return m, nil -} - -func _Index_DeleteDocument_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(IndexServer).DeleteDocument(&indexDeleteDocumentServer{stream}) -} - -type Index_DeleteDocumentServer interface { - SendAndClose(*DeleteDocumentResponse) error - Recv() (*DeleteDocumentRequest, error) - grpc.ServerStream + if interceptor == nil { + return srv.(IndexServer).Index(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Index", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Index(ctx, req.(*IndexRequest)) + } + return interceptor(ctx, in, info, handler) } -type indexDeleteDocumentServer struct { - grpc.ServerStream +func _Index_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Delete(ctx, req.(*DeleteRequest)) + } + return interceptor(ctx, in, info, handler) } -func (x *indexDeleteDocumentServer) SendAndClose(m *DeleteDocumentResponse) error { - return x.ServerStream.SendMsg(m) +func _Index_BulkIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BulkIndexRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).BulkIndex(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/BulkIndex", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).BulkIndex(ctx, req.(*BulkIndexRequest)) + } + return interceptor(ctx, in, info, handler) } -func (x *indexDeleteDocumentServer) Recv() (*DeleteDocumentRequest, error) { - m := new(DeleteDocumentRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { +func _Index_BulkDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BulkDeleteRequest) + if err := dec(in); err != nil { return nil, err } - return m, nil + if interceptor == nil { + return srv.(IndexServer).BulkDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/BulkDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).BulkDelete(ctx, req.(*BulkDeleteRequest)) + } + return interceptor(ctx, in, info, handler) } func _Index_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { @@ -1716,8 +2004,24 @@ var _Index_serviceDesc = grpc.ServiceDesc{ Handler: _Index_ClusterInfo_Handler, }, { - MethodName: "GetDocument", - Handler: _Index_GetDocument_Handler, + MethodName: "Get", + Handler: _Index_Get_Handler, + }, + { + MethodName: "Index", + Handler: _Index_Index_Handler, + }, + { + MethodName: "Delete", + Handler: _Index_Delete_Handler, + }, + { + MethodName: "BulkIndex", + Handler: _Index_BulkIndex_Handler, + }, + { + MethodName: "BulkDelete", + Handler: _Index_BulkDelete_Handler, }, { MethodName: "Search", @@ -1742,16 +2046,6 @@ var _Index_serviceDesc = grpc.ServiceDesc{ Handler: _Index_ClusterWatch_Handler, ServerStreams: true, }, - { - StreamName: "IndexDocument", - Handler: _Index_IndexDocument_Handler, - ClientStreams: true, - }, - { - StreamName: "DeleteDocument", - Handler: _Index_DeleteDocument_Handler, - ClientStreams: true, - }, }, Metadata: "protobuf/index/index.proto", } diff --git a/protobuf/index/index.pb.gw.go b/protobuf/index/index.pb.gw.go new file mode 100644 index 0000000..a54291a --- /dev/null +++ b/protobuf/index/index.pb.gw.go @@ -0,0 +1,510 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: protobuf/index/index.proto + +/* +Package index is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package index + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/empty" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +var ( + filter_Index_NodeHealthCheck_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Index_NodeHealthCheck_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq NodeHealthCheckRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Index_NodeHealthCheck_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.NodeHealthCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Index_NodeInfo_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.NodeInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Index_ClusterInfo_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.ClusterInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Index_Get_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Index_Index_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq IndexRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Index(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Index_Index_1(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq IndexRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Index(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Index_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Delete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Index_BulkIndex_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkIndexRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BulkIndex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Index_BulkDelete_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkDeleteRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BulkDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Index_Search_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SearchRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Search(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +// RegisterIndexHandlerFromEndpoint is same as RegisterIndexHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterIndexHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterIndexHandler(ctx, mux, conn) +} + +// RegisterIndexHandler registers the http handlers for service Index to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterIndexHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterIndexHandlerClient(ctx, mux, NewIndexClient(conn)) +} + +// RegisterIndexHandlerClient registers the http handlers for service Index +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "IndexClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "IndexClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "IndexClient" to call the correct interceptors. +func RegisterIndexHandlerClient(ctx context.Context, mux *runtime.ServeMux, client IndexClient) error { + + mux.Handle("GET", pattern_Index_NodeHealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_NodeHealthCheck_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_NodeHealthCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_NodeInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_NodeInfo_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_NodeInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_ClusterInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_ClusterInfo_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_ClusterInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Get_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_Index_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Index_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Index_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_Index_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Index_1(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Index_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Delete_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_BulkIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_BulkIndex_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_BulkIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_BulkDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_BulkDelete_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_BulkDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Index_Search_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Search_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Search_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Index_NodeHealthCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "healthcheck"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_NodeInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "status"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_ClusterInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "cluster", "status"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Index_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "documents"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Index_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_BulkIndex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "bulk"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_BulkDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "bulk"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Search_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "search"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Index_NodeHealthCheck_0 = runtime.ForwardResponseMessage + + forward_Index_NodeInfo_0 = runtime.ForwardResponseMessage + + forward_Index_ClusterInfo_0 = runtime.ForwardResponseMessage + + forward_Index_Get_0 = runtime.ForwardResponseMessage + + forward_Index_Index_0 = runtime.ForwardResponseMessage + + forward_Index_Index_1 = runtime.ForwardResponseMessage + + forward_Index_Delete_0 = runtime.ForwardResponseMessage + + forward_Index_BulkIndex_0 = runtime.ForwardResponseMessage + + forward_Index_BulkDelete_0 = runtime.ForwardResponseMessage + + forward_Index_Search_0 = runtime.ForwardResponseMessage +) diff --git a/protobuf/index/index.proto b/protobuf/index/index.proto index 0943a3b..5dee6a8 100644 --- a/protobuf/index/index.proto +++ b/protobuf/index/index.proto @@ -16,24 +16,71 @@ syntax = "proto3"; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; +import "google/api/annotations.proto"; package index; option go_package = "github.com/mosuka/blast/protobuf/index"; service Index { - rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) {} - rpc NodeInfo (google.protobuf.Empty) returns (NodeInfoResponse) {} + rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) { + option (google.api.http) = { + get: "/v1/node/healthcheck" + }; + } + rpc NodeInfo (google.protobuf.Empty) returns (NodeInfoResponse) { + option (google.api.http) = { + get: "/v1/node/status" + }; + } rpc ClusterJoin (ClusterJoinRequest) returns (google.protobuf.Empty) {} rpc ClusterLeave (ClusterLeaveRequest) returns (google.protobuf.Empty) {} - rpc ClusterInfo (google.protobuf.Empty) returns (ClusterInfoResponse) {} + rpc ClusterInfo (google.protobuf.Empty) returns (ClusterInfoResponse) { + option (google.api.http) = { + get: "/v1/cluster/status" + }; + } rpc ClusterWatch (google.protobuf.Empty) returns (stream ClusterWatchResponse) {} - rpc GetDocument (GetDocumentRequest) returns (GetDocumentResponse) {} - rpc IndexDocument (stream IndexDocumentRequest) returns (IndexDocumentResponse) {} - rpc DeleteDocument (stream DeleteDocumentRequest) returns (DeleteDocumentResponse) {} - rpc Search (SearchRequest) returns (SearchResponse) {} + rpc Get (GetRequest) returns (GetResponse) { + option (google.api.http) = { + get: "/v1/documents/{id=**}" + }; + } + rpc Index (IndexRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + put: "/v1/documents" + body: "*" + additional_bindings { + put: "/v1/documents/{id=**}" + body: "*" + } + }; + } + rpc Delete (DeleteRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/documents/{id=**}" + }; + } + rpc BulkIndex (BulkIndexRequest) returns (BulkIndexResponse) { + option (google.api.http) = { + put: "/v1/bulk" + body: "*" + }; + } + rpc BulkDelete (BulkDeleteRequest) returns (BulkDeleteResponse) { + option (google.api.http) = { + delete: "/v1/bulk" + body: "*" + }; + } + rpc Search (SearchRequest) returns (SearchResponse) { + option (google.api.http) = { + post: "/v1/search" + body: "*" + }; + } rpc GetIndexConfig (google.protobuf.Empty) returns (GetIndexConfigResponse) {} rpc GetIndexStats (google.protobuf.Empty) returns (GetIndexStatsResponse) {} rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) {} @@ -41,28 +88,31 @@ service Index { message NodeHealthCheckRequest { enum Probe { - HEALTHINESS = 0; - LIVENESS = 1; - READINESS = 2; + UNKNOWN = 0; + HEALTHINESS = 1; + LIVENESS = 2; + READINESS = 3; } Probe probe = 1; } message NodeHealthCheckResponse { enum State { - HEALTHY = 0; - UNHEALTHY = 1; - ALIVE = 2; - DEAD = 3; - READY = 4; - NOT_READY = 5; + UNKNOWN = 0; + HEALTHY = 1; + UNHEALTHY = 2; + ALIVE = 3; + DEAD = 4; + READY = 5; + NOT_READY = 6; } State state = 1; } message Metadata { string grpc_address = 1; - string http_address = 2; + string grpc_gateway_address = 2; + string http_address = 3; } message Node { @@ -111,32 +161,42 @@ message ClusterWatchResponse { Cluster cluster = 3; } -message Document { +message GetRequest { + string id = 1; +} + +message GetResponse { +// Document document = 1; + google.protobuf.Any fields = 1; +} + +message IndexRequest { string id = 1; google.protobuf.Any fields = 2; } -message GetDocumentRequest { +message DeleteRequest { string id = 1; } -message GetDocumentResponse { - Document document = 1; +message Document { + string id = 1; + google.protobuf.Any fields = 2; } -message IndexDocumentRequest { - Document document = 1; +message BulkIndexRequest { + repeated Document documents = 1; } -message IndexDocumentResponse { +message BulkIndexResponse { int32 count = 1; } -message DeleteDocumentRequest { - string id = 1; +message BulkDeleteRequest { + repeated string ids = 1; } -message DeleteDocumentResponse { +message BulkDeleteResponse { int32 count = 1; } @@ -162,3 +222,20 @@ message GetIndexStatsResponse { google.protobuf.Any index_stats = 1; } +message Proposal { + enum Event { + UNKNOWN = 0; + SET_NODE = 1; + DELETE_NODE = 2; + INDEX = 3; + DELETE = 4; + BULK_INDEX = 5; + BULK_DELETE = 6; + } + Event event = 1; + Node node = 2; + Document document = 3; + string id = 4; + repeated Document documents = 5; + repeated string ids = 6; +} diff --git a/protobuf/management/management.pb.go b/protobuf/management/management.pb.go index a2554fb..430c2e5 100644 --- a/protobuf/management/management.pb.go +++ b/protobuf/management/management.pb.go @@ -9,7 +9,10 @@ import ( proto "github.com/golang/protobuf/proto" any "github.com/golang/protobuf/ptypes/any" empty "github.com/golang/protobuf/ptypes/empty" + _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" math "math" ) @@ -22,26 +25,29 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type NodeHealthCheckRequest_Probe int32 const ( - NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 0 - NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 1 - NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 2 + NodeHealthCheckRequest_UNKNOWN NodeHealthCheckRequest_Probe = 0 + NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 1 + NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 2 + NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 3 ) var NodeHealthCheckRequest_Probe_name = map[int32]string{ - 0: "HEALTHINESS", - 1: "LIVENESS", - 2: "READINESS", + 0: "UNKNOWN", + 1: "HEALTHINESS", + 2: "LIVENESS", + 3: "READINESS", } var NodeHealthCheckRequest_Probe_value = map[string]int32{ - "HEALTHINESS": 0, - "LIVENESS": 1, - "READINESS": 2, + "UNKNOWN": 0, + "HEALTHINESS": 1, + "LIVENESS": 2, + "READINESS": 3, } func (x NodeHealthCheckRequest_Probe) String() string { @@ -55,30 +61,33 @@ func (NodeHealthCheckRequest_Probe) EnumDescriptor() ([]byte, []int) { type NodeHealthCheckResponse_State int32 const ( - NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 0 - NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 1 - NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 2 - NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 3 - NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 4 - NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 5 + NodeHealthCheckResponse_UNKNOWN NodeHealthCheckResponse_State = 0 + NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 1 + NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 2 + NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 3 + NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 4 + NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 5 + NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 6 ) var NodeHealthCheckResponse_State_name = map[int32]string{ - 0: "HEALTHY", - 1: "UNHEALTHY", - 2: "ALIVE", - 3: "DEAD", - 4: "READY", - 5: "NOT_READY", + 0: "UNKNOWN", + 1: "HEALTHY", + 2: "UNHEALTHY", + 3: "ALIVE", + 4: "DEAD", + 5: "READY", + 6: "NOT_READY", } var NodeHealthCheckResponse_State_value = map[string]int32{ - "HEALTHY": 0, - "UNHEALTHY": 1, - "ALIVE": 2, - "DEAD": 3, - "READY": 4, - "NOT_READY": 5, + "UNKNOWN": 0, + "HEALTHY": 1, + "UNHEALTHY": 2, + "ALIVE": 3, + "DEAD": 4, + "READY": 5, + "NOT_READY": 6, } func (x NodeHealthCheckResponse_State) String() string { @@ -179,7 +188,41 @@ func (x WatchResponse_Command) String() string { } func (WatchResponse_Command) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{15, 0} + return fileDescriptor_5e030ad796566078, []int{16, 0} +} + +type Proposal_Event int32 + +const ( + Proposal_UNKNOWN Proposal_Event = 0 + Proposal_SET_NODE Proposal_Event = 1 + Proposal_DELETE_NODE Proposal_Event = 2 + Proposal_SET_VALUE Proposal_Event = 3 + Proposal_DELETE_VALUE Proposal_Event = 4 +) + +var Proposal_Event_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SET_NODE", + 2: "DELETE_NODE", + 3: "SET_VALUE", + 4: "DELETE_VALUE", +} + +var Proposal_Event_value = map[string]int32{ + "UNKNOWN": 0, + "SET_NODE": 1, + "DELETE_NODE": 2, + "SET_VALUE": 3, + "DELETE_VALUE": 4, +} + +func (x Proposal_Event) String() string { + return proto.EnumName(Proposal_Event_name, int32(x)) +} + +func (Proposal_Event) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{17, 0} } type NodeHealthCheckRequest struct { @@ -218,7 +261,7 @@ func (m *NodeHealthCheckRequest) GetProbe() NodeHealthCheckRequest_Probe { if m != nil { return m.Probe } - return NodeHealthCheckRequest_HEALTHINESS + return NodeHealthCheckRequest_UNKNOWN } type NodeHealthCheckResponse struct { @@ -257,12 +300,13 @@ func (m *NodeHealthCheckResponse) GetState() NodeHealthCheckResponse_State { if m != nil { return m.State } - return NodeHealthCheckResponse_HEALTHY + return NodeHealthCheckResponse_UNKNOWN } type Metadata struct { GrpcAddress string `protobuf:"bytes,1,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` - HttpAddress string `protobuf:"bytes,2,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` + GrpcGatewayAddress string `protobuf:"bytes,2,opt,name=grpc_gateway_address,json=grpcGatewayAddress,proto3" json:"grpc_gateway_address,omitempty"` + HttpAddress string `protobuf:"bytes,3,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -300,6 +344,13 @@ func (m *Metadata) GetGrpcAddress() string { return "" } +func (m *Metadata) GetGrpcGatewayAddress() string { + if m != nil { + return m.GrpcGatewayAddress + } + return "" +} + func (m *Metadata) GetHttpAddress() string { if m != nil { return m.HttpAddress @@ -620,6 +671,53 @@ func (m *ClusterWatchResponse) GetCluster() *Cluster { return nil } +type KeyValue struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *any.Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (m *KeyValue) String() string { return proto.CompactTextString(m) } +func (*KeyValue) ProtoMessage() {} +func (*KeyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{10} +} + +func (m *KeyValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyValue.Unmarshal(m, b) +} +func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic) +} +func (m *KeyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyValue.Merge(m, src) +} +func (m *KeyValue) XXX_Size() int { + return xxx_messageInfo_KeyValue.Size(m) +} +func (m *KeyValue) XXX_DiscardUnknown() { + xxx_messageInfo_KeyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyValue proto.InternalMessageInfo + +func (m *KeyValue) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *KeyValue) GetValue() *any.Any { + if m != nil { + return m.Value + } + return nil +} + type GetRequest struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -631,7 +729,7 @@ func (m *GetRequest) Reset() { *m = GetRequest{} } func (m *GetRequest) String() string { return proto.CompactTextString(m) } func (*GetRequest) ProtoMessage() {} func (*GetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{10} + return fileDescriptor_5e030ad796566078, []int{11} } func (m *GetRequest) XXX_Unmarshal(b []byte) error { @@ -670,7 +768,7 @@ func (m *GetResponse) Reset() { *m = GetResponse{} } func (m *GetResponse) String() string { return proto.CompactTextString(m) } func (*GetResponse) ProtoMessage() {} func (*GetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{11} + return fileDescriptor_5e030ad796566078, []int{12} } func (m *GetResponse) XXX_Unmarshal(b []byte) error { @@ -710,7 +808,7 @@ func (m *SetRequest) Reset() { *m = SetRequest{} } func (m *SetRequest) String() string { return proto.CompactTextString(m) } func (*SetRequest) ProtoMessage() {} func (*SetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{12} + return fileDescriptor_5e030ad796566078, []int{13} } func (m *SetRequest) XXX_Unmarshal(b []byte) error { @@ -756,7 +854,7 @@ func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } func (*DeleteRequest) ProtoMessage() {} func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{13} + return fileDescriptor_5e030ad796566078, []int{14} } func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { @@ -795,7 +893,7 @@ func (m *WatchRequest) Reset() { *m = WatchRequest{} } func (m *WatchRequest) String() string { return proto.CompactTextString(m) } func (*WatchRequest) ProtoMessage() {} func (*WatchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{14} + return fileDescriptor_5e030ad796566078, []int{15} } func (m *WatchRequest) XXX_Unmarshal(b []byte) error { @@ -836,7 +934,7 @@ func (m *WatchResponse) Reset() { *m = WatchResponse{} } func (m *WatchResponse) String() string { return proto.CompactTextString(m) } func (*WatchResponse) ProtoMessage() {} func (*WatchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{15} + return fileDescriptor_5e030ad796566078, []int{16} } func (m *WatchResponse) XXX_Unmarshal(b []byte) error { @@ -878,12 +976,68 @@ func (m *WatchResponse) GetValue() *any.Any { return nil } +type Proposal struct { + Event Proposal_Event `protobuf:"varint,1,opt,name=event,proto3,enum=management.Proposal_Event" json:"event,omitempty"` + Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` + KeyValue *KeyValue `protobuf:"bytes,3,opt,name=key_value,json=keyValue,proto3" json:"key_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Proposal) Reset() { *m = Proposal{} } +func (m *Proposal) String() string { return proto.CompactTextString(m) } +func (*Proposal) ProtoMessage() {} +func (*Proposal) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{17} +} + +func (m *Proposal) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Proposal.Unmarshal(m, b) +} +func (m *Proposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Proposal.Marshal(b, m, deterministic) +} +func (m *Proposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_Proposal.Merge(m, src) +} +func (m *Proposal) XXX_Size() int { + return xxx_messageInfo_Proposal.Size(m) +} +func (m *Proposal) XXX_DiscardUnknown() { + xxx_messageInfo_Proposal.DiscardUnknown(m) +} + +var xxx_messageInfo_Proposal proto.InternalMessageInfo + +func (m *Proposal) GetEvent() Proposal_Event { + if m != nil { + return m.Event + } + return Proposal_UNKNOWN +} + +func (m *Proposal) GetNode() *Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *Proposal) GetKeyValue() *KeyValue { + if m != nil { + return m.KeyValue + } + return nil +} + func init() { proto.RegisterEnum("management.NodeHealthCheckRequest_Probe", NodeHealthCheckRequest_Probe_name, NodeHealthCheckRequest_Probe_value) proto.RegisterEnum("management.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) proto.RegisterEnum("management.Node_State", Node_State_name, Node_State_value) proto.RegisterEnum("management.ClusterWatchResponse_Event", ClusterWatchResponse_Event_name, ClusterWatchResponse_Event_value) proto.RegisterEnum("management.WatchResponse_Command", WatchResponse_Command_name, WatchResponse_Command_value) + proto.RegisterEnum("management.Proposal_Event", Proposal_Event_name, Proposal_Event_value) proto.RegisterType((*NodeHealthCheckRequest)(nil), "management.NodeHealthCheckRequest") proto.RegisterType((*NodeHealthCheckResponse)(nil), "management.NodeHealthCheckResponse") proto.RegisterType((*Metadata)(nil), "management.Metadata") @@ -895,12 +1049,14 @@ func init() { proto.RegisterType((*ClusterLeaveRequest)(nil), "management.ClusterLeaveRequest") proto.RegisterType((*ClusterInfoResponse)(nil), "management.ClusterInfoResponse") proto.RegisterType((*ClusterWatchResponse)(nil), "management.ClusterWatchResponse") + proto.RegisterType((*KeyValue)(nil), "management.KeyValue") proto.RegisterType((*GetRequest)(nil), "management.GetRequest") proto.RegisterType((*GetResponse)(nil), "management.GetResponse") proto.RegisterType((*SetRequest)(nil), "management.SetRequest") proto.RegisterType((*DeleteRequest)(nil), "management.DeleteRequest") proto.RegisterType((*WatchRequest)(nil), "management.WatchRequest") proto.RegisterType((*WatchResponse)(nil), "management.WatchResponse") + proto.RegisterType((*Proposal)(nil), "management.Proposal") } func init() { @@ -908,68 +1064,82 @@ func init() { } var fileDescriptor_5e030ad796566078 = []byte{ - // 963 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xed, 0x6e, 0xdb, 0x36, - 0x14, 0xb5, 0x2c, 0x2b, 0x76, 0xae, 0x93, 0x56, 0x60, 0x8b, 0x34, 0xf1, 0x86, 0x2e, 0xe1, 0xba, - 0x22, 0x5b, 0x57, 0xa7, 0xf0, 0x56, 0x2c, 0xeb, 0xba, 0x0f, 0x35, 0xd2, 0x62, 0xa7, 0xaa, 0x1c, - 0xc8, 0x4e, 0x83, 0x0e, 0x03, 0x0a, 0xd9, 0x62, 0x6d, 0x23, 0xb6, 0xe4, 0x59, 0x74, 0x80, 0x3c, - 0xc3, 0x06, 0xec, 0x4d, 0xf6, 0x77, 0xaf, 0xb3, 0x5f, 0x7b, 0x8e, 0x82, 0x22, 0x25, 0x4b, 0x8a, - 0xec, 0xe4, 0x9f, 0x79, 0x79, 0xce, 0xe5, 0xb9, 0x87, 0xf7, 0x52, 0x86, 0x47, 0xd3, 0x99, 0x4f, - 0xfd, 0xde, 0xfc, 0xc3, 0xc1, 0xc4, 0xf1, 0x9c, 0x01, 0x99, 0x10, 0x8f, 0x26, 0x7e, 0xd6, 0xc3, - 0x6d, 0x04, 0x8b, 0x48, 0x6d, 0x67, 0xe0, 0xfb, 0x83, 0x31, 0x39, 0x88, 0x89, 0x8e, 0x77, 0xc5, - 0x61, 0xb5, 0x4f, 0xb2, 0x5b, 0x64, 0x32, 0xa5, 0x62, 0x13, 0xff, 0x2d, 0xc1, 0x96, 0xe5, 0xbb, - 0xa4, 0x49, 0x9c, 0x31, 0x1d, 0x1e, 0x0d, 0x49, 0xff, 0xc2, 0x26, 0x7f, 0xcc, 0x49, 0x40, 0xd1, - 0x4f, 0xa0, 0x4c, 0x67, 0x7e, 0x8f, 0x6c, 0x4b, 0xbb, 0xd2, 0xfe, 0x9d, 0xc6, 0x7e, 0x3d, 0x21, - 0x20, 0x9f, 0x52, 0x3f, 0x65, 0x78, 0x9b, 0xd3, 0xf0, 0x73, 0x50, 0xc2, 0x35, 0xba, 0x0b, 0xd5, - 0xa6, 0xa1, 0x99, 0xdd, 0x66, 0xcb, 0x32, 0x3a, 0x1d, 0xb5, 0x80, 0x36, 0xa0, 0x62, 0xb6, 0xde, - 0x1a, 0xe1, 0x4a, 0x42, 0x9b, 0xb0, 0x6e, 0x1b, 0x9a, 0xce, 0x37, 0x8b, 0xf8, 0x1f, 0x09, 0x1e, - 0x5c, 0x4b, 0x1f, 0x4c, 0x7d, 0x2f, 0x20, 0xe8, 0x67, 0x50, 0x02, 0xea, 0xd0, 0x48, 0xd2, 0x97, - 0x2b, 0x25, 0x71, 0x4e, 0xbd, 0xc3, 0x08, 0x36, 0xe7, 0x61, 0x1b, 0x94, 0x70, 0x8d, 0xaa, 0x50, - 0xe6, 0x9a, 0xde, 0xa9, 0x05, 0xa6, 0xe0, 0xcc, 0x8a, 0x96, 0x12, 0x5a, 0x07, 0x45, 0x63, 0xfa, - 0xd4, 0x22, 0xaa, 0x40, 0x49, 0x37, 0x34, 0x5d, 0x95, 0x59, 0x90, 0xa9, 0x7c, 0xa7, 0x96, 0x18, - 0xdc, 0x6a, 0x77, 0xdf, 0xf3, 0xa5, 0x82, 0x4f, 0xa1, 0xf2, 0x86, 0x50, 0xc7, 0x75, 0xa8, 0x83, - 0xf6, 0x60, 0x63, 0x30, 0x9b, 0xf6, 0xdf, 0x3b, 0xae, 0x3b, 0x23, 0x41, 0x10, 0xea, 0x5c, 0xb7, - 0xab, 0x2c, 0xa6, 0xf1, 0x10, 0x83, 0x0c, 0x29, 0x9d, 0xc6, 0x90, 0x22, 0x87, 0xb0, 0x98, 0x80, - 0xe0, 0xff, 0x25, 0x28, 0xb1, 0x72, 0xd0, 0x1d, 0x28, 0x8e, 0x5c, 0x91, 0xa4, 0x38, 0x72, 0x19, - 0xb7, 0x37, 0xf2, 0xdc, 0x2c, 0x97, 0xc5, 0xa2, 0xf4, 0x5f, 0x47, 0x16, 0xc9, 0xa1, 0x45, 0x5b, - 0x59, 0x8b, 0x52, 0x7e, 0xa0, 0x67, 0x50, 0x99, 0x08, 0xed, 0xdb, 0xa5, 0x5d, 0x69, 0xbf, 0xda, - 0xb8, 0x9f, 0x24, 0x44, 0x75, 0xd9, 0x31, 0x0a, 0xbf, 0x4e, 0x38, 0x78, 0x66, 0xbd, 0xb6, 0xda, - 0xe7, 0x16, 0xbf, 0xd1, 0x5f, 0xdb, 0xa6, 0xd9, 0x3e, 0x37, 0x6c, 0x7e, 0xa3, 0x47, 0x9a, 0xa5, - 0xb7, 0x74, 0xad, 0xcb, 0x4c, 0x04, 0x58, 0x33, 0x0d, 0x4d, 0x37, 0x6c, 0x55, 0x66, 0xc0, 0x4e, - 0xf3, 0xac, 0xab, 0x33, 0x5a, 0x09, 0xff, 0x29, 0x41, 0xf9, 0x68, 0x3c, 0x0f, 0x28, 0x99, 0xa1, - 0x6f, 0x41, 0xf1, 0x7c, 0x97, 0x30, 0xcf, 0xe4, 0xfd, 0x6a, 0xe3, 0x61, 0x52, 0x87, 0xc0, 0x84, - 0x05, 0x04, 0x86, 0x47, 0x67, 0x57, 0x36, 0x07, 0xd7, 0x4e, 0x00, 0x16, 0x41, 0xa4, 0x82, 0x7c, - 0x41, 0xae, 0x84, 0x61, 0xec, 0x27, 0x7a, 0x0c, 0xca, 0xa5, 0x33, 0x9e, 0x93, 0xd0, 0xaa, 0x6a, - 0x43, 0xcd, 0xda, 0x61, 0xf3, 0xed, 0x17, 0xc5, 0x43, 0x09, 0x1f, 0x82, 0xca, 0x42, 0x2d, 0xef, - 0x83, 0x1f, 0x77, 0xdc, 0x23, 0x28, 0xb1, 0x83, 0xc2, 0x94, 0x79, 0xf4, 0x70, 0x17, 0xbf, 0x00, - 0x24, 0x24, 0x9e, 0xf8, 0x23, 0x2f, 0x1a, 0xa0, 0xdb, 0x71, 0xbf, 0x80, 0x7b, 0x82, 0x6b, 0x12, - 0xe7, 0x92, 0x44, 0xe4, 0xcc, 0xd5, 0x63, 0x3d, 0x86, 0xa5, 0xf4, 0x3d, 0x85, 0x72, 0x9f, 0x87, - 0xc5, 0x31, 0xf7, 0x72, 0x7c, 0xb3, 0x23, 0x0c, 0xfe, 0x4f, 0x82, 0xfb, 0x22, 0x78, 0xee, 0xd0, - 0xfe, 0x30, 0xce, 0xf3, 0x12, 0x14, 0x72, 0x49, 0x3c, 0x2a, 0x26, 0xeb, 0x71, 0x4e, 0x96, 0x14, - 0xa1, 0x6e, 0x30, 0xb4, 0xcd, 0x49, 0x71, 0xa5, 0xc5, 0x55, 0x95, 0x26, 0xb5, 0xca, 0xb7, 0xd0, - 0xfa, 0x1c, 0x94, 0xf0, 0x90, 0x74, 0xa7, 0x55, 0xa0, 0x74, 0xd2, 0x6e, 0x59, 0x7c, 0x4c, 0x4d, - 0x43, 0x7b, 0x2b, 0x3a, 0xec, 0xec, 0x34, 0xec, 0x36, 0x19, 0x3f, 0x04, 0x38, 0x26, 0x34, 0xb2, - 0xf1, 0x5a, 0x47, 0xe0, 0xef, 0xa1, 0x1a, 0xee, 0x8b, 0xc2, 0xbf, 0x8a, 0x1a, 0x44, 0x12, 0xed, - 0xcf, 0x5f, 0xcb, 0x7a, 0xf4, 0x5a, 0xd6, 0x35, 0xef, 0x4a, 0x34, 0x09, 0x3e, 0x01, 0xe8, 0xac, - 0x48, 0xbd, 0xc8, 0x55, 0xbc, 0x39, 0xd7, 0x1e, 0x6c, 0xea, 0x64, 0x4c, 0x28, 0x59, 0xae, 0x74, - 0x17, 0x36, 0x84, 0xe7, 0xcb, 0x10, 0xff, 0x4a, 0xb0, 0x99, 0xbe, 0xc7, 0x1f, 0xa0, 0xdc, 0xf7, - 0x27, 0x13, 0xc7, 0x73, 0xc5, 0x4d, 0xee, 0x25, 0x3d, 0x4e, 0x5f, 0xe1, 0x11, 0x07, 0xda, 0x11, - 0x23, 0x3a, 0xa0, 0x98, 0x53, 0x91, 0x7c, 0x73, 0x45, 0x4f, 0xa0, 0x2c, 0x32, 0xa6, 0x6f, 0xac, - 0x0c, 0x72, 0xc7, 0xe8, 0xaa, 0x12, 0xbb, 0x25, 0xdd, 0x30, 0x0d, 0xf6, 0x26, 0x34, 0xfe, 0x5a, - 0x03, 0x78, 0x13, 0x0b, 0x43, 0xbf, 0xc3, 0xdd, 0xcc, 0xfb, 0x8d, 0xf0, 0xcd, 0xdf, 0x9b, 0xda, - 0xe7, 0xb7, 0xf8, 0x00, 0xe0, 0x02, 0x7a, 0x05, 0x95, 0x68, 0xb0, 0xd1, 0xd6, 0xb5, 0x12, 0x0c, - 0xf6, 0x39, 0xac, 0x7d, 0x9a, 0x4d, 0x95, 0x1c, 0x33, 0x5c, 0x40, 0xc7, 0x50, 0x4d, 0x8c, 0x38, - 0xca, 0x7b, 0x9e, 0x12, 0xb3, 0x5f, 0x5b, 0x72, 0x0c, 0x2e, 0xa0, 0x16, 0x6c, 0x24, 0xe7, 0x1d, - 0x7d, 0x96, 0x93, 0x29, 0xf9, 0x12, 0xac, 0x48, 0xd5, 0x8c, 0x35, 0xad, 0x2c, 0x2d, 0xef, 0x84, - 0x4c, 0x75, 0x66, 0x2c, 0x2a, 0x6c, 0x91, 0xa5, 0xa9, 0x76, 0x6f, 0x7a, 0x17, 0x70, 0xe1, 0x99, - 0x84, 0x0e, 0x41, 0x3e, 0x26, 0x14, 0xa5, 0xbe, 0x3d, 0x8b, 0x99, 0xac, 0x3d, 0xb8, 0x16, 0x8f, - 0x75, 0x7c, 0x07, 0x72, 0x27, 0xcb, 0x5c, 0x8c, 0xdc, 0x0a, 0x2b, 0x7e, 0x84, 0x35, 0x3e, 0x4e, - 0x68, 0x27, 0xc9, 0x4d, 0x8d, 0xd8, 0x0a, 0xfa, 0x2f, 0xa0, 0xf0, 0xc2, 0xb7, 0x73, 0xc6, 0x85, - 0x93, 0x77, 0x96, 0x0e, 0x52, 0x58, 0xf3, 0x4b, 0xa8, 0x74, 0x3c, 0x67, 0x1a, 0x0c, 0x7d, 0xba, - 0xd4, 0xbd, 0xa5, 0xe7, 0xbf, 0x7a, 0xfa, 0xdb, 0x93, 0xc1, 0x88, 0x0e, 0xe7, 0xbd, 0x7a, 0xdf, - 0x9f, 0x1c, 0x4c, 0xfc, 0x60, 0x7e, 0xe1, 0x1c, 0xf4, 0xc6, 0x4e, 0x40, 0x0f, 0x72, 0xfe, 0x0a, - 0xf6, 0xd6, 0xc2, 0xe0, 0x37, 0x1f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb6, 0xdd, 0x42, 0x6a, 0x28, - 0x0a, 0x00, 0x00, + // 1193 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0xcd, 0x72, 0xda, 0x56, + 0x14, 0x8e, 0x10, 0x18, 0x7c, 0x20, 0xb1, 0x72, 0xcd, 0xf8, 0x87, 0x7a, 0x52, 0x5b, 0x4d, 0x33, + 0xae, 0xd3, 0x80, 0xe3, 0xb6, 0x33, 0xa9, 0xfb, 0x4b, 0x2c, 0xd5, 0xc6, 0x26, 0xe0, 0x0a, 0x6c, + 0x8f, 0xbb, 0xf1, 0x5c, 0xe0, 0x06, 0x18, 0x40, 0xa2, 0xe8, 0xe2, 0x96, 0xe9, 0x74, 0x93, 0x6d, + 0x97, 0xdd, 0xf6, 0x3d, 0xba, 0xc8, 0x63, 0xf4, 0x05, 0xba, 0xe8, 0x74, 0xd3, 0x97, 0xe8, 0xdc, + 0x1f, 0xc9, 0x12, 0x16, 0xb6, 0xdb, 0x9d, 0x74, 0xce, 0x77, 0xbe, 0xf3, 0x9d, 0x73, 0x8f, 0xce, + 0x05, 0x78, 0x3c, 0x1c, 0x39, 0xd4, 0x69, 0x8c, 0x5f, 0x17, 0x06, 0xd8, 0xc6, 0x6d, 0x32, 0x20, + 0x36, 0x0d, 0x3c, 0xe6, 0xb9, 0x1b, 0xc1, 0x95, 0x25, 0xb7, 0xda, 0x76, 0x9c, 0x76, 0x9f, 0x14, + 0xfc, 0x40, 0x6c, 0x4f, 0x04, 0x2c, 0xf7, 0xce, 0xb4, 0x8b, 0x0c, 0x86, 0xd4, 0x73, 0xae, 0x49, + 0x27, 0x1e, 0x76, 0x0b, 0xd8, 0xb6, 0x1d, 0x8a, 0x69, 0xd7, 0xb1, 0x5d, 0xe1, 0xd5, 0x7f, 0x53, + 0x60, 0xa9, 0xe2, 0xb4, 0xc8, 0x01, 0xc1, 0x7d, 0xda, 0xd9, 0xeb, 0x90, 0x66, 0xcf, 0x22, 0xdf, + 0x8f, 0x89, 0x4b, 0xd1, 0x97, 0x90, 0x18, 0x8e, 0x9c, 0x06, 0x59, 0x51, 0xd6, 0x95, 0xcd, 0x07, + 0x3b, 0x9b, 0xf9, 0x80, 0xbc, 0xe8, 0x90, 0xfc, 0x31, 0xc3, 0x5b, 0x22, 0x4c, 0x7f, 0x09, 0x09, + 0xfe, 0x8e, 0xd2, 0x90, 0x3c, 0xa9, 0x1c, 0x55, 0xaa, 0x67, 0x15, 0xed, 0x1e, 0x5a, 0x80, 0xf4, + 0x81, 0x59, 0x2c, 0xd7, 0x0f, 0x4a, 0x15, 0xb3, 0x56, 0xd3, 0x14, 0x94, 0x81, 0x54, 0xb9, 0x74, + 0x6a, 0xf2, 0xb7, 0x18, 0xba, 0x0f, 0xf3, 0x96, 0x59, 0x34, 0x84, 0x53, 0xd5, 0xdf, 0x2a, 0xb0, + 0x7c, 0x2d, 0x97, 0x3b, 0x74, 0x6c, 0x97, 0xa0, 0xaf, 0x20, 0xe1, 0x52, 0x4c, 0x3d, 0x7d, 0x1f, + 0xdc, 0xa8, 0x4f, 0xc4, 0xe4, 0x6b, 0x2c, 0xc0, 0x12, 0x71, 0xfa, 0x05, 0x24, 0xf8, 0x7b, 0x58, + 0x60, 0x1a, 0x92, 0x42, 0xe0, 0xb9, 0xa6, 0x30, 0x39, 0x27, 0x15, 0xef, 0x35, 0x86, 0xe6, 0x21, + 0x51, 0x64, 0x62, 0x35, 0x15, 0xa5, 0x20, 0x6e, 0x98, 0x45, 0x43, 0x8b, 0x33, 0x23, 0x93, 0x7c, + 0xae, 0x25, 0x18, 0xbc, 0x52, 0xad, 0x5f, 0x88, 0xd7, 0x39, 0xfd, 0x8d, 0x02, 0xa9, 0x57, 0x84, + 0xe2, 0x16, 0xa6, 0x18, 0x6d, 0x40, 0xa6, 0x3d, 0x1a, 0x36, 0x2f, 0x70, 0xab, 0x35, 0x22, 0xae, + 0xcb, 0x55, 0xcf, 0x5b, 0x69, 0x66, 0x2b, 0x0a, 0x13, 0xda, 0x86, 0x2c, 0x87, 0xb4, 0x31, 0x25, + 0x3f, 0xe0, 0x89, 0x0f, 0x8d, 0x71, 0x28, 0x62, 0xbe, 0x7d, 0xe1, 0xf2, 0x22, 0x36, 0x20, 0xd3, + 0xa1, 0x74, 0xe8, 0x23, 0x55, 0x41, 0xca, 0x6c, 0x12, 0xa2, 0xff, 0xad, 0x40, 0x9c, 0xb5, 0x03, + 0x3d, 0x80, 0x58, 0xb7, 0x25, 0xd3, 0xc6, 0xba, 0x2d, 0x16, 0xdb, 0xe8, 0xda, 0xad, 0xa9, 0x2c, + 0x69, 0x66, 0xf3, 0xe8, 0x3f, 0xf4, 0x5a, 0xac, 0xf2, 0x16, 0x2f, 0x4d, 0xb7, 0x38, 0xd4, 0x4f, + 0xb4, 0x0d, 0xa9, 0x81, 0xac, 0x76, 0x25, 0xbe, 0xae, 0x6c, 0xa6, 0x77, 0xb2, 0xc1, 0x00, 0xaf, + 0x13, 0x96, 0x8f, 0xd2, 0x8f, 0x22, 0x4f, 0x20, 0x03, 0xa9, 0x6f, 0xaa, 0xe5, 0x72, 0xf5, 0xcc, + 0xb4, 0xc4, 0x11, 0xec, 0x15, 0x2b, 0x46, 0xc9, 0x28, 0xd6, 0x4d, 0x2d, 0x86, 0x00, 0xe6, 0xca, + 0x66, 0xd1, 0x30, 0x2d, 0x4d, 0x65, 0xc0, 0xda, 0xc1, 0x49, 0xdd, 0x60, 0x61, 0x71, 0xfd, 0x17, + 0x05, 0x92, 0x7b, 0xfd, 0xb1, 0x4b, 0xc9, 0x08, 0x7d, 0x0c, 0x09, 0xdb, 0x69, 0x11, 0xd6, 0x65, + 0x75, 0x33, 0xbd, 0xf3, 0x28, 0xa8, 0x43, 0x62, 0x78, 0x01, 0xae, 0x69, 0xd3, 0xd1, 0xc4, 0x12, + 0xe0, 0xdc, 0x21, 0xc0, 0x95, 0x11, 0x69, 0xa0, 0xf6, 0xc8, 0x44, 0x36, 0x8c, 0x3d, 0xa2, 0x27, + 0x90, 0xb8, 0xc4, 0xfd, 0x31, 0xe1, 0xad, 0x4a, 0xef, 0x68, 0xd3, 0xed, 0xb0, 0x84, 0x7b, 0x37, + 0xf6, 0x42, 0xd1, 0x5f, 0x80, 0xc6, 0x4c, 0x25, 0xfb, 0xb5, 0xe3, 0x4f, 0xec, 0x63, 0x88, 0xb3, + 0x44, 0x9c, 0x32, 0x2a, 0x9c, 0x7b, 0xf5, 0x5d, 0x40, 0x52, 0xe2, 0xa1, 0xd3, 0xb5, 0xbd, 0xaf, + 0xf1, 0x6e, 0xb1, 0xef, 0xc3, 0xa2, 0x8c, 0x2d, 0x13, 0x7c, 0x49, 0xbc, 0xe0, 0xa9, 0xa3, 0xd7, + 0x0d, 0x1f, 0x16, 0xd2, 0xf7, 0x0c, 0x92, 0x4d, 0x61, 0x96, 0x69, 0x16, 0x23, 0xfa, 0x66, 0x79, + 0x18, 0xfd, 0x4f, 0x05, 0xb2, 0xd2, 0x78, 0x86, 0x69, 0xb3, 0xe3, 0xf3, 0x7c, 0x0e, 0x09, 0x72, + 0x49, 0x6c, 0x2a, 0xbf, 0xcc, 0x27, 0x11, 0x2c, 0xa1, 0x80, 0xbc, 0xc9, 0xd0, 0x96, 0x08, 0xf2, + 0x2b, 0x8d, 0xdd, 0x54, 0x69, 0x50, 0xab, 0x7a, 0x07, 0xad, 0x9f, 0x40, 0x82, 0x27, 0x09, 0x4f, + 0x5a, 0x0a, 0xe2, 0x87, 0xd5, 0x52, 0x45, 0x53, 0xd8, 0x47, 0x5c, 0x36, 0x8b, 0xa7, 0x72, 0xc2, + 0x4e, 0x8e, 0xf9, 0xb4, 0xa9, 0xfa, 0x01, 0xa4, 0x8e, 0xc8, 0xe4, 0x94, 0x9d, 0x6a, 0xc4, 0x3c, + 0x6c, 0x85, 0xe7, 0x21, 0x9b, 0x17, 0xab, 0x36, 0xef, 0xed, 0xe1, 0x7c, 0xd1, 0x9e, 0xc8, 0x99, + 0xd0, 0x1f, 0x01, 0xec, 0x13, 0xea, 0x1d, 0xc8, 0x35, 0x2e, 0xfd, 0x53, 0x48, 0x73, 0xbf, 0x6c, + 0xa1, 0x4f, 0xad, 0xdc, 0x4e, 0x7d, 0x08, 0x50, 0xbb, 0x81, 0xfa, 0x3f, 0xc9, 0xdc, 0x80, 0xfb, + 0x06, 0xe9, 0x13, 0x4a, 0x66, 0x2b, 0x5d, 0x87, 0x8c, 0x3c, 0xbd, 0x59, 0x88, 0xdf, 0x15, 0xb8, + 0x1f, 0x9e, 0x88, 0xcf, 0x20, 0xd9, 0x74, 0x06, 0x03, 0x6c, 0xb7, 0xe4, 0x4c, 0x6c, 0x04, 0x4f, + 0x2b, 0x3c, 0x0c, 0x7b, 0x02, 0x68, 0x79, 0x11, 0x5e, 0x82, 0x58, 0x44, 0x45, 0xea, 0xed, 0x15, + 0x3d, 0x85, 0xa4, 0x64, 0x0c, 0x9f, 0x7d, 0x12, 0xd4, 0x9a, 0x59, 0xd7, 0x14, 0x76, 0xde, 0x86, + 0x59, 0x36, 0xd9, 0x76, 0xd1, 0xff, 0x51, 0x20, 0x75, 0x3c, 0x72, 0x86, 0x8e, 0x8b, 0xfb, 0x68, + 0x3b, 0x3c, 0xc6, 0xb9, 0xa0, 0x64, 0x0f, 0xf4, 0x7f, 0x46, 0xf7, 0x39, 0xcc, 0xf7, 0xc8, 0xe4, + 0x22, 0x5c, 0x41, 0x00, 0xea, 0x4d, 0x9c, 0x95, 0xea, 0xc9, 0x27, 0xbd, 0x1e, 0x39, 0xbe, 0x6c, + 0xff, 0x99, 0xf5, 0x8b, 0x4a, 0xd5, 0x30, 0x35, 0x85, 0xdd, 0xac, 0xa2, 0x0e, 0x61, 0xe0, 0x77, + 0x29, 0x73, 0x9f, 0x16, 0xcb, 0x27, 0xec, 0xc6, 0xd2, 0x20, 0x23, 0xfd, 0xc2, 0x12, 0xdf, 0x79, + 0x9b, 0x04, 0x78, 0xe5, 0xe7, 0x45, 0x3f, 0xc2, 0xc2, 0xd4, 0xbd, 0x89, 0xf4, 0xdb, 0x2f, 0xfd, + 0xdc, 0x7b, 0x77, 0xb8, 0x78, 0xf5, 0xb5, 0x37, 0x7f, 0xfc, 0xf5, 0x6b, 0x6c, 0x09, 0x65, 0x0b, + 0x97, 0xcf, 0x0b, 0xac, 0x0b, 0x85, 0x0e, 0x47, 0x35, 0x79, 0x9a, 0x73, 0x48, 0x79, 0xcb, 0x12, + 0x2d, 0x5d, 0x3b, 0x4c, 0x93, 0xfd, 0x9a, 0xc9, 0xad, 0x4d, 0xa7, 0x09, 0xae, 0x2e, 0x7d, 0x99, + 0xf3, 0x3f, 0x44, 0x0b, 0x3e, 0x3f, 0xbb, 0x93, 0xc6, 0x2e, 0xda, 0x87, 0x74, 0x60, 0x9b, 0xa2, + 0xa8, 0x9b, 0x20, 0xb0, 0x66, 0x73, 0x33, 0xb2, 0xeb, 0xf7, 0x50, 0x09, 0x32, 0xc1, 0xd5, 0x8a, + 0xde, 0x8d, 0x60, 0x0a, 0x2e, 0xdd, 0x1b, 0xa8, 0x1a, 0xbe, 0xa6, 0x1b, 0x2b, 0x8e, 0xca, 0x10, + 0x2a, 0x3a, 0xc7, 0x8b, 0xce, 0x22, 0xc4, 0x8a, 0x96, 0x9b, 0xce, 0xab, 0xbb, 0xec, 0xcb, 0xe5, + 0x5f, 0xd7, 0xcc, 0x24, 0xeb, 0xb7, 0x2d, 0x67, 0xfd, 0xde, 0xb6, 0x82, 0xbe, 0x05, 0x75, 0x9f, + 0x50, 0x14, 0xfa, 0x01, 0x70, 0xb5, 0xce, 0x72, 0xcb, 0xd7, 0xec, 0x32, 0x76, 0x95, 0x2b, 0x5c, + 0x44, 0x0f, 0x99, 0x42, 0x76, 0xe5, 0x17, 0x7e, 0xea, 0x91, 0xc9, 0x17, 0x5b, 0x5b, 0x3f, 0xa3, + 0x1a, 0xa8, 0xb5, 0x69, 0xca, 0xab, 0x35, 0x36, 0xb3, 0x7b, 0x72, 0x90, 0x72, 0xd7, 0x19, 0x77, + 0x95, 0x2d, 0x74, 0x0a, 0x73, 0x62, 0x7d, 0xa1, 0xd5, 0x20, 0x6f, 0x68, 0xa5, 0xcd, 0xa4, 0x96, + 0x62, 0xb7, 0x22, 0xc4, 0x7e, 0x0d, 0x09, 0xd1, 0xc6, 0x95, 0x88, 0xbd, 0x25, 0x58, 0x57, 0x67, + 0x6e, 0x34, 0xde, 0xc1, 0x63, 0x48, 0xd5, 0x6c, 0x3c, 0x74, 0x3b, 0x0e, 0x9d, 0x79, 0x16, 0xb3, + 0x84, 0x65, 0xb9, 0xb0, 0x07, 0x28, 0xc3, 0x84, 0xb9, 0x92, 0xe5, 0xe5, 0xb3, 0xef, 0x9e, 0xb6, + 0xbb, 0xb4, 0x33, 0x6e, 0xe4, 0x9b, 0xce, 0xa0, 0x30, 0x70, 0xdc, 0x71, 0x0f, 0x17, 0x1a, 0x7d, + 0xec, 0xd2, 0x42, 0xc4, 0x9f, 0x8b, 0xc6, 0x1c, 0x37, 0x7e, 0xf4, 0x6f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x57, 0x7b, 0x42, 0x0c, 0x7a, 0x0c, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1165,6 +1335,44 @@ type ManagementServer interface { Snapshot(context.Context, *empty.Empty) (*empty.Empty, error) } +// UnimplementedManagementServer can be embedded to have forward compatible implementations. +type UnimplementedManagementServer struct { +} + +func (*UnimplementedManagementServer) NodeHealthCheck(ctx context.Context, req *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeHealthCheck not implemented") +} +func (*UnimplementedManagementServer) NodeInfo(ctx context.Context, req *empty.Empty) (*NodeInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeInfo not implemented") +} +func (*UnimplementedManagementServer) ClusterJoin(ctx context.Context, req *ClusterJoinRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClusterJoin not implemented") +} +func (*UnimplementedManagementServer) ClusterLeave(ctx context.Context, req *ClusterLeaveRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClusterLeave not implemented") +} +func (*UnimplementedManagementServer) ClusterInfo(ctx context.Context, req *empty.Empty) (*ClusterInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClusterInfo not implemented") +} +func (*UnimplementedManagementServer) ClusterWatch(req *empty.Empty, srv Management_ClusterWatchServer) error { + return status.Errorf(codes.Unimplemented, "method ClusterWatch not implemented") +} +func (*UnimplementedManagementServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (*UnimplementedManagementServer) Set(ctx context.Context, req *SetRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") +} +func (*UnimplementedManagementServer) Delete(ctx context.Context, req *DeleteRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (*UnimplementedManagementServer) Watch(req *WatchRequest, srv Management_WatchServer) error { + return status.Errorf(codes.Unimplemented, "method Watch not implemented") +} +func (*UnimplementedManagementServer) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Snapshot not implemented") +} + func RegisterManagementServer(s *grpc.Server, srv ManagementServer) { s.RegisterService(&_Management_serviceDesc, srv) } diff --git a/protobuf/management/management.pb.gw.go b/protobuf/management/management.pb.gw.go new file mode 100644 index 0000000..5430218 --- /dev/null +++ b/protobuf/management/management.pb.gw.go @@ -0,0 +1,379 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: protobuf/management/management.proto + +/* +Package management is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package management + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/empty" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +var ( + filter_Management_NodeHealthCheck_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Management_NodeHealthCheck_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq NodeHealthCheckRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Management_NodeHealthCheck_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.NodeHealthCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Management_NodeInfo_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.NodeInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Management_ClusterInfo_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.ClusterInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Management_Get_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["key"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") + } + + protoReq.Key, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) + } + + msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Management_Set_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SetRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["key"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") + } + + protoReq.Key, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) + } + + msg, err := client.Set(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Management_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["key"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") + } + + protoReq.Key, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) + } + + msg, err := client.Delete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Management_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.Snapshot(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +// RegisterManagementHandlerFromEndpoint is same as RegisterManagementHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterManagementHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterManagementHandler(ctx, mux, conn) +} + +// RegisterManagementHandler registers the http handlers for service Management to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterManagementHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterManagementHandlerClient(ctx, mux, NewManagementClient(conn)) +} + +// RegisterManagementHandlerClient registers the http handlers for service Management +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ManagementClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ManagementClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "ManagementClient" to call the correct interceptors. +func RegisterManagementHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ManagementClient) error { + + mux.Handle("GET", pattern_Management_NodeHealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Management_NodeHealthCheck_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Management_NodeHealthCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Management_NodeInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Management_NodeInfo_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Management_NodeInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Management_ClusterInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Management_ClusterInfo_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Management_ClusterInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Management_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Management_Get_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Management_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Management_Set_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Management_Set_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Management_Set_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Management_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Management_Delete_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Management_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Management_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Management_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Management_Snapshot_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Management_NodeHealthCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "healthcheck"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Management_NodeInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "status"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Management_ClusterInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "cluster", "status"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Management_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "data", "key"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Management_Set_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "data", "key"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Management_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "data", "key"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Management_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "snapshot"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Management_NodeHealthCheck_0 = runtime.ForwardResponseMessage + + forward_Management_NodeInfo_0 = runtime.ForwardResponseMessage + + forward_Management_ClusterInfo_0 = runtime.ForwardResponseMessage + + forward_Management_Get_0 = runtime.ForwardResponseMessage + + forward_Management_Set_0 = runtime.ForwardResponseMessage + + forward_Management_Delete_0 = runtime.ForwardResponseMessage + + forward_Management_Snapshot_0 = runtime.ForwardResponseMessage +) diff --git a/protobuf/management/management.proto b/protobuf/management/management.proto index 2a7d736..621603b 100644 --- a/protobuf/management/management.proto +++ b/protobuf/management/management.proto @@ -16,51 +16,84 @@ syntax = "proto3"; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; +import "google/api/annotations.proto"; package management; option go_package = "github.com/mosuka/blast/protobuf/management"; service Management { - rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) {} - rpc NodeInfo (google.protobuf.Empty) returns (NodeInfoResponse) {} + rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) { + option (google.api.http) = { + get: "/v1/node/healthcheck" + }; + } + rpc NodeInfo (google.protobuf.Empty) returns (NodeInfoResponse) { + option (google.api.http) = { + get: "/v1/node/status" + }; + } rpc ClusterJoin (ClusterJoinRequest) returns (google.protobuf.Empty) {} rpc ClusterLeave (ClusterLeaveRequest) returns (google.protobuf.Empty) {} - rpc ClusterInfo (google.protobuf.Empty) returns (ClusterInfoResponse) {} + rpc ClusterInfo (google.protobuf.Empty) returns (ClusterInfoResponse) { + option (google.api.http) = { + get: "/v1/cluster/status" + }; + } rpc ClusterWatch (google.protobuf.Empty) returns (stream ClusterWatchResponse) {} - rpc Get (GetRequest) returns (GetResponse) {} - rpc Set (SetRequest) returns (google.protobuf.Empty) {} - rpc Delete (DeleteRequest) returns (google.protobuf.Empty) {} + rpc Get (GetRequest) returns (GetResponse) { + option (google.api.http) = { + get: "/v1/data/{key=**}" + }; + } + rpc Set (SetRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + put: "/v1/data/{key=**}" + body: "*" + }; + } + rpc Delete (DeleteRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/data/{key=**}" + }; + } rpc Watch (WatchRequest) returns (stream WatchResponse) {} - rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) {} + rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) { + option (google.api.http) = { + get: "/v1/snapshot" + }; + } } message NodeHealthCheckRequest { enum Probe { - HEALTHINESS = 0; - LIVENESS = 1; - READINESS = 2; + UNKNOWN = 0; + HEALTHINESS = 1; + LIVENESS = 2; + READINESS = 3; } Probe probe = 1; } message NodeHealthCheckResponse { enum State { - HEALTHY = 0; - UNHEALTHY = 1; - ALIVE = 2; - DEAD = 3; - READY = 4; - NOT_READY = 5; + UNKNOWN = 0; + HEALTHY = 1; + UNHEALTHY = 2; + ALIVE = 3; + DEAD = 4; + READY = 5; + NOT_READY = 6; } State state = 1; } message Metadata { string grpc_address = 1; - string http_address = 2; + string grpc_gateway_address = 2; + string http_address = 3; } message Node { @@ -109,6 +142,11 @@ message ClusterWatchResponse { Cluster cluster = 3; } +message KeyValue { + string key = 1; + google.protobuf.Any value = 2; +} + message GetRequest { string key = 1; } @@ -140,3 +178,16 @@ message WatchResponse { string key = 2; google.protobuf.Any value = 3; } + +message Proposal { + enum Event { + UNKNOWN = 0; + SET_NODE = 1; + DELETE_NODE = 2; + SET_VALUE = 3; + DELETE_VALUE = 4; + } + Event event = 1; + Node node = 2; + KeyValue key_value = 3; +} From 1da2140c7cd1966b510b2f3d78eccbf5911a27cc Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Wed, 28 Aug 2019 20:14:44 +0900 Subject: [PATCH 36/76] Add swagger specification experimentaly (#107) --- Makefile | 6 +- go.mod | 21 +- go.sum | 21 + protobuf/distribute/distribute.swagger.json | 362 +++++++++++++ protobuf/index/index.swagger.json | 557 ++++++++++++++++++++ protobuf/management/management.pb.go | 159 +++--- protobuf/management/management.proto | 10 + protobuf/management/management.swagger.json | 409 ++++++++++++++ 8 files changed, 1459 insertions(+), 86 deletions(-) create mode 100644 protobuf/distribute/distribute.swagger.json create mode 100644 protobuf/index/index.swagger.json create mode 100644 protobuf/management/management.swagger.json diff --git a/Makefile b/Makefile index c77e6f4..f017c30 100644 --- a/Makefile +++ b/Makefile @@ -47,9 +47,9 @@ endif protoc: @echo ">> generating proto3 code" @echo " GRPC_GATEWAY_PATH = $(GRPC_GATEWAY_PATH)" - @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --go_out=plugins=grpc:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done - @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --grpc-gateway_out=logtostderr=true,allow_delete_body=true:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done -# @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --swagger_out=logtostderr=true,allow_delete_body=true:. $$proto_dir/*.proto || exit 1; done + @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --go_out=plugins=grpc:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done + @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --grpc-gateway_out=logtostderr=true,allow_delete_body=true:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done + @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --swagger_out=logtostderr=true,allow_delete_body=true:. $$proto_dir/*.proto || exit 1; done .PHONY: format format: diff --git a/go.mod b/go.mod index 99b2de5..2adb2ee 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/mosuka/blast go 1.12 require ( - cloud.google.com/go v0.43.0 // indirect + cloud.google.com/go v0.44.3 // indirect github.com/blevesearch/bleve v0.7.0 github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3 // indirect github.com/blevesearch/cld2 v0.0.0-20150916130542-10f17c049ec9 // indirect @@ -24,7 +24,7 @@ require ( github.com/gorilla/mux v1.7.0 github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/grpc-ecosystem/grpc-gateway v1.9.5 + github.com/grpc-ecosystem/grpc-gateway v1.9.6 github.com/hashicorp/golang-lru v0.5.3 // indirect github.com/hashicorp/raft v1.1.0 github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477 @@ -42,6 +42,7 @@ require ( github.com/prometheus/procfs v0.0.0-20190322151404-55ae3d9d5573 // indirect github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 // indirect github.com/rogpeppe/fastuuid v1.2.0 // indirect + github.com/rogpeppe/go-internal v1.3.1 // indirect github.com/stretchr/objx v0.1.1 github.com/syndtr/goleveldb v1.0.0 // indirect github.com/tebeka/snowball v0.0.0-20130405174319-16e884df4e19 // indirect @@ -50,13 +51,15 @@ require ( go.uber.org/atomic v1.4.0 // indirect go.uber.org/multierr v1.1.0 // indirect go.uber.org/zap v1.10.0 - golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 // indirect - golang.org/x/mobile v0.0.0-20190806162312-597adff16ade // indirect - golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 // indirect - golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa // indirect - golang.org/x/tools v0.0.0-20190808195139-e713427fea3f // indirect - google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 - google.golang.org/grpc v1.22.1 + golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 // indirect + golang.org/x/image v0.0.0-20190823064033-3a9bac650e44 // indirect + golang.org/x/mobile v0.0.0-20190826170111-cafc553e1ac5 // indirect + golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 // indirect + golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 // indirect + golang.org/x/tools v0.0.0-20190827205025-b29f5f60c37a // indirect + google.golang.org/api v0.9.0 // indirect + google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 + google.golang.org/grpc v1.23.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/yaml.v2 v2.2.2 honnef.co/go/tools v0.0.1-2019.2.2 // indirect diff --git a/go.sum b/go.sum index c57d73c..1fe5def 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,9 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= @@ -116,6 +119,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.6 h1:8p0pcgLlw2iuZVsdHdPaMUXFOA+6gDixcXbHEMzSyW8= +github.com/grpc-ecosystem/grpc-gateway v1.9.6/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= @@ -206,6 +211,7 @@ github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001/go.mod h1:qq github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= @@ -248,11 +254,13 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190823064033-3a9bac650e44/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -260,6 +268,7 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190806162312-597adff16ade/go.mod h1:AlhUtkH4DA4asiFC5RgK7ZKmauvtkAVcy9L0epCzlWo= +golang.org/x/mobile v0.0.0-20190826170111-cafc553e1ac5/go.mod h1:mJOp/i0LXPxJZ9weeIadcPqKVfS05Ai7m6/t9z1Hs/Y= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -279,6 +288,8 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -304,6 +315,8 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa h1:KIDDMLT1O0Nr7TSxp8xM5tJcdn8tgyAONntO829og1M= golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -324,9 +337,13 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190808195139-e713427fea3f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190827205025-b29f5f60c37a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -339,11 +356,15 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.1 h1:/7cs52RnTJmD43s3uxzlq2U7nqVTd/37viQwMrMNlOM= google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/protobuf/distribute/distribute.swagger.json b/protobuf/distribute/distribute.swagger.json new file mode 100644 index 0000000..8ddf64d --- /dev/null +++ b/protobuf/distribute/distribute.swagger.json @@ -0,0 +1,362 @@ +{ + "swagger": "2.0", + "info": { + "title": "protobuf/distribute/distribute.proto", + "version": "version not set" + }, + "schemes": [ + "http", + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/v1/bulk": { + "delete": { + "operationId": "BulkDelete", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/distributeBulkDeleteResponse" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/distributeBulkDeleteRequest" + } + } + ], + "tags": [ + "Distribute" + ] + }, + "put": { + "operationId": "BulkIndex", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/distributeBulkIndexResponse" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/distributeBulkIndexRequest" + } + } + ], + "tags": [ + "Distribute" + ] + } + }, + "/v1/documents": { + "put": { + "operationId": "Index", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "properties": {} + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/distributeIndexRequest" + } + } + ], + "tags": [ + "Distribute" + ] + } + }, + "/v1/documents/{id}": { + "get": { + "operationId": "Get", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/distributeGetResponse" + } + } + }, + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "type": "string" + } + ], + "tags": [ + "Distribute" + ] + }, + "delete": { + "operationId": "Delete", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "properties": {} + } + } + }, + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "type": "string" + } + ], + "tags": [ + "Distribute" + ] + }, + "put": { + "operationId": "Index2", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "properties": {} + } + } + }, + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/distributeIndexRequest" + } + } + ], + "tags": [ + "Distribute" + ] + } + }, + "/v1/node/healthcheck": { + "get": { + "operationId": "NodeHealthCheck", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/distributeNodeHealthCheckResponse" + } + } + }, + "parameters": [ + { + "name": "probe", + "in": "query", + "required": false, + "type": "string", + "enum": [ + "UNKNOWN", + "HEALTHINESS", + "LIVENESS", + "READINESS" + ], + "default": "UNKNOWN" + } + ], + "tags": [ + "Distribute" + ] + } + }, + "/v1/search": { + "post": { + "operationId": "Search", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/distributeSearchResponse" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/distributeSearchRequest" + } + } + ], + "tags": [ + "Distribute" + ] + } + } + }, + "definitions": { + "distributeBulkDeleteRequest": { + "type": "object", + "properties": { + "ids": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "distributeBulkDeleteResponse": { + "type": "object", + "properties": { + "count": { + "type": "integer", + "format": "int32" + } + } + }, + "distributeBulkIndexRequest": { + "type": "object", + "properties": { + "documents": { + "type": "array", + "items": { + "$ref": "#/definitions/indexDocument" + } + } + } + }, + "distributeBulkIndexResponse": { + "type": "object", + "properties": { + "count": { + "type": "integer", + "format": "int32" + } + } + }, + "distributeGetResponse": { + "type": "object", + "properties": { + "fields": { + "$ref": "#/definitions/protobufAny" + } + } + }, + "distributeIndexRequest": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "fields": { + "$ref": "#/definitions/protobufAny" + } + } + }, + "distributeNodeHealthCheckRequestProbe": { + "type": "string", + "enum": [ + "UNKNOWN", + "HEALTHINESS", + "LIVENESS", + "READINESS" + ], + "default": "UNKNOWN" + }, + "distributeNodeHealthCheckResponse": { + "type": "object", + "properties": { + "state": { + "$ref": "#/definitions/distributeNodeHealthCheckResponseState" + } + } + }, + "distributeNodeHealthCheckResponseState": { + "type": "string", + "enum": [ + "UNKNOWN", + "HEALTHY", + "UNHEALTHY", + "ALIVE", + "DEAD", + "READY", + "NOT_READY" + ], + "default": "UNKNOWN" + }, + "distributeSearchRequest": { + "type": "object", + "properties": { + "search_request": { + "$ref": "#/definitions/protobufAny" + } + } + }, + "distributeSearchResponse": { + "type": "object", + "properties": { + "search_result": { + "$ref": "#/definitions/protobufAny" + } + } + }, + "indexDocument": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "fields": { + "$ref": "#/definitions/protobufAny" + } + } + }, + "protobufAny": { + "type": "object", + "properties": { + "type_url": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + }, + "value": { + "type": "string", + "format": "byte", + "description": "Must be a valid serialized protocol buffer of the above specified type." + } + }, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } + } +} diff --git a/protobuf/index/index.swagger.json b/protobuf/index/index.swagger.json new file mode 100644 index 0000000..5d96593 --- /dev/null +++ b/protobuf/index/index.swagger.json @@ -0,0 +1,557 @@ +{ + "swagger": "2.0", + "info": { + "title": "protobuf/index/index.proto", + "version": "version not set" + }, + "schemes": [ + "http", + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/v1/bulk": { + "delete": { + "operationId": "BulkDelete", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/indexBulkDeleteResponse" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/indexBulkDeleteRequest" + } + } + ], + "tags": [ + "Index" + ] + }, + "put": { + "operationId": "BulkIndex", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/indexBulkIndexResponse" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/indexBulkIndexRequest" + } + } + ], + "tags": [ + "Index" + ] + } + }, + "/v1/cluster/status": { + "get": { + "operationId": "ClusterInfo", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/indexClusterInfoResponse" + } + } + }, + "tags": [ + "Index" + ] + } + }, + "/v1/documents": { + "put": { + "operationId": "Index", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "properties": {} + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/indexIndexRequest" + } + } + ], + "tags": [ + "Index" + ] + } + }, + "/v1/documents/{id}": { + "get": { + "operationId": "Get", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/indexGetResponse" + } + } + }, + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "type": "string" + } + ], + "tags": [ + "Index" + ] + }, + "delete": { + "operationId": "Delete", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "properties": {} + } + } + }, + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "type": "string" + } + ], + "tags": [ + "Index" + ] + }, + "put": { + "operationId": "Index2", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "properties": {} + } + } + }, + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/indexIndexRequest" + } + } + ], + "tags": [ + "Index" + ] + } + }, + "/v1/node/healthcheck": { + "get": { + "operationId": "NodeHealthCheck", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/indexNodeHealthCheckResponse" + } + } + }, + "parameters": [ + { + "name": "probe", + "in": "query", + "required": false, + "type": "string", + "enum": [ + "UNKNOWN", + "HEALTHINESS", + "LIVENESS", + "READINESS" + ], + "default": "UNKNOWN" + } + ], + "tags": [ + "Index" + ] + } + }, + "/v1/node/status": { + "get": { + "operationId": "NodeInfo", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/indexNodeInfoResponse" + } + } + }, + "tags": [ + "Index" + ] + } + }, + "/v1/search": { + "post": { + "operationId": "Search", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/indexSearchResponse" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/indexSearchRequest" + } + } + ], + "tags": [ + "Index" + ] + } + } + }, + "definitions": { + "NodeHealthCheckRequestProbe": { + "type": "string", + "enum": [ + "UNKNOWN", + "HEALTHINESS", + "LIVENESS", + "READINESS" + ], + "default": "UNKNOWN" + }, + "indexBulkDeleteRequest": { + "type": "object", + "properties": { + "ids": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "indexBulkDeleteResponse": { + "type": "object", + "properties": { + "count": { + "type": "integer", + "format": "int32" + } + } + }, + "indexBulkIndexRequest": { + "type": "object", + "properties": { + "documents": { + "type": "array", + "items": { + "$ref": "#/definitions/indexDocument" + } + } + } + }, + "indexBulkIndexResponse": { + "type": "object", + "properties": { + "count": { + "type": "integer", + "format": "int32" + } + } + }, + "indexCluster": { + "type": "object", + "properties": { + "nodes": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/indexNode" + } + } + } + }, + "indexClusterInfoResponse": { + "type": "object", + "properties": { + "cluster": { + "$ref": "#/definitions/indexCluster" + } + } + }, + "indexClusterWatchResponse": { + "type": "object", + "properties": { + "event": { + "$ref": "#/definitions/indexClusterWatchResponseEvent" + }, + "node": { + "$ref": "#/definitions/indexNode" + }, + "cluster": { + "$ref": "#/definitions/indexCluster" + } + } + }, + "indexClusterWatchResponseEvent": { + "type": "string", + "enum": [ + "UNKNOWN", + "JOIN", + "LEAVE", + "UPDATE" + ], + "default": "UNKNOWN" + }, + "indexDocument": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "fields": { + "$ref": "#/definitions/protobufAny" + } + } + }, + "indexGetIndexConfigResponse": { + "type": "object", + "properties": { + "index_config": { + "$ref": "#/definitions/indexIndexConfig" + } + } + }, + "indexGetIndexStatsResponse": { + "type": "object", + "properties": { + "index_stats": { + "$ref": "#/definitions/protobufAny" + } + } + }, + "indexGetResponse": { + "type": "object", + "properties": { + "fields": { + "$ref": "#/definitions/protobufAny", + "title": "Document document = 1;" + } + } + }, + "indexIndexConfig": { + "type": "object", + "properties": { + "index_mapping": { + "$ref": "#/definitions/protobufAny" + }, + "index_type": { + "type": "string" + }, + "index_storage_type": { + "type": "string" + } + } + }, + "indexIndexRequest": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "fields": { + "$ref": "#/definitions/protobufAny" + } + } + }, + "indexMetadata": { + "type": "object", + "properties": { + "grpc_address": { + "type": "string" + }, + "grpc_gateway_address": { + "type": "string" + }, + "http_address": { + "type": "string" + } + } + }, + "indexNode": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "bind_address": { + "type": "string" + }, + "state": { + "$ref": "#/definitions/indexNodeState" + }, + "metadata": { + "$ref": "#/definitions/indexMetadata" + } + } + }, + "indexNodeHealthCheckResponse": { + "type": "object", + "properties": { + "state": { + "$ref": "#/definitions/indexNodeHealthCheckResponseState" + } + } + }, + "indexNodeHealthCheckResponseState": { + "type": "string", + "enum": [ + "UNKNOWN", + "HEALTHY", + "UNHEALTHY", + "ALIVE", + "DEAD", + "READY", + "NOT_READY" + ], + "default": "UNKNOWN" + }, + "indexNodeInfoResponse": { + "type": "object", + "properties": { + "node": { + "$ref": "#/definitions/indexNode" + } + } + }, + "indexNodeState": { + "type": "string", + "enum": [ + "UNKNOWN", + "FOLLOWER", + "CANDIDATE", + "LEADER", + "SHUTDOWN" + ], + "default": "UNKNOWN" + }, + "indexSearchRequest": { + "type": "object", + "properties": { + "search_request": { + "$ref": "#/definitions/protobufAny" + } + } + }, + "indexSearchResponse": { + "type": "object", + "properties": { + "search_result": { + "$ref": "#/definitions/protobufAny" + } + } + }, + "protobufAny": { + "type": "object", + "properties": { + "type_url": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + }, + "value": { + "type": "string", + "format": "byte", + "description": "Must be a valid serialized protocol buffer of the above specified type." + } + }, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + }, + "runtimeStreamError": { + "type": "object", + "properties": { + "grpc_code": { + "type": "integer", + "format": "int32" + }, + "http_code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "http_status": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + } + }, + "x-stream-definitions": { + "indexClusterWatchResponse": { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/indexClusterWatchResponse" + }, + "error": { + "$ref": "#/definitions/runtimeStreamError" + } + }, + "title": "Stream result of indexClusterWatchResponse" + } + } +} diff --git a/protobuf/management/management.pb.go b/protobuf/management/management.pb.go index 430c2e5..40577fb 100644 --- a/protobuf/management/management.pb.go +++ b/protobuf/management/management.pb.go @@ -9,6 +9,7 @@ import ( proto "github.com/golang/protobuf/proto" any "github.com/golang/protobuf/ptypes/any" empty "github.com/golang/protobuf/ptypes/empty" + _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -758,6 +759,15 @@ func (m *GetRequest) GetKey() string { } type GetResponse struct { + // option (grpc.gateway.protoc_gen_swagger.options.openapiv2_schema) = { + // json_schema: { + // required: ["value"] + // }, + // example: { + // value: '{ "fields": { "field1": "Get Example", "field2": "This is an example Get response." } }' + // } + // }; + // google.protobuf.Any value = 1 [(grpc.gateway.protoc_gen_swagger.options.openapiv2_field) = {type: 6}]; Value *any.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -1064,82 +1074,83 @@ func init() { } var fileDescriptor_5e030ad796566078 = []byte{ - // 1193 bytes of a gzipped FileDescriptorProto + // 1213 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0xcd, 0x72, 0xda, 0x56, 0x14, 0x8e, 0x10, 0x18, 0x7c, 0x20, 0xb1, 0x72, 0xcd, 0xf8, 0x87, 0x7a, 0x52, 0x5b, 0x4d, 0x33, - 0xae, 0xd3, 0x80, 0xe3, 0xb6, 0x33, 0xa9, 0xfb, 0x4b, 0x2c, 0xd5, 0xc6, 0x26, 0xe0, 0x0a, 0x6c, - 0x8f, 0xbb, 0xf1, 0x5c, 0xe0, 0x06, 0x18, 0x40, 0xa2, 0xe8, 0xe2, 0x96, 0xe9, 0x74, 0x93, 0x6d, - 0x97, 0xdd, 0xf6, 0x3d, 0xba, 0xc8, 0x63, 0xf4, 0x05, 0xba, 0xe8, 0x74, 0xd3, 0x97, 0xe8, 0xdc, - 0x1f, 0xc9, 0x12, 0x16, 0xb6, 0xdb, 0x9d, 0x74, 0xce, 0x77, 0xbe, 0xf3, 0x9d, 0x73, 0x8f, 0xce, - 0x05, 0x78, 0x3c, 0x1c, 0x39, 0xd4, 0x69, 0x8c, 0x5f, 0x17, 0x06, 0xd8, 0xc6, 0x6d, 0x32, 0x20, - 0x36, 0x0d, 0x3c, 0xe6, 0xb9, 0x1b, 0xc1, 0x95, 0x25, 0xb7, 0xda, 0x76, 0x9c, 0x76, 0x9f, 0x14, - 0xfc, 0x40, 0x6c, 0x4f, 0x04, 0x2c, 0xf7, 0xce, 0xb4, 0x8b, 0x0c, 0x86, 0xd4, 0x73, 0xae, 0x49, - 0x27, 0x1e, 0x76, 0x0b, 0xd8, 0xb6, 0x1d, 0x8a, 0x69, 0xd7, 0xb1, 0x5d, 0xe1, 0xd5, 0x7f, 0x53, - 0x60, 0xa9, 0xe2, 0xb4, 0xc8, 0x01, 0xc1, 0x7d, 0xda, 0xd9, 0xeb, 0x90, 0x66, 0xcf, 0x22, 0xdf, - 0x8f, 0x89, 0x4b, 0xd1, 0x97, 0x90, 0x18, 0x8e, 0x9c, 0x06, 0x59, 0x51, 0xd6, 0x95, 0xcd, 0x07, - 0x3b, 0x9b, 0xf9, 0x80, 0xbc, 0xe8, 0x90, 0xfc, 0x31, 0xc3, 0x5b, 0x22, 0x4c, 0x7f, 0x09, 0x09, - 0xfe, 0x8e, 0xd2, 0x90, 0x3c, 0xa9, 0x1c, 0x55, 0xaa, 0x67, 0x15, 0xed, 0x1e, 0x5a, 0x80, 0xf4, - 0x81, 0x59, 0x2c, 0xd7, 0x0f, 0x4a, 0x15, 0xb3, 0x56, 0xd3, 0x14, 0x94, 0x81, 0x54, 0xb9, 0x74, - 0x6a, 0xf2, 0xb7, 0x18, 0xba, 0x0f, 0xf3, 0x96, 0x59, 0x34, 0x84, 0x53, 0xd5, 0xdf, 0x2a, 0xb0, - 0x7c, 0x2d, 0x97, 0x3b, 0x74, 0x6c, 0x97, 0xa0, 0xaf, 0x20, 0xe1, 0x52, 0x4c, 0x3d, 0x7d, 0x1f, - 0xdc, 0xa8, 0x4f, 0xc4, 0xe4, 0x6b, 0x2c, 0xc0, 0x12, 0x71, 0xfa, 0x05, 0x24, 0xf8, 0x7b, 0x58, - 0x60, 0x1a, 0x92, 0x42, 0xe0, 0xb9, 0xa6, 0x30, 0x39, 0x27, 0x15, 0xef, 0x35, 0x86, 0xe6, 0x21, - 0x51, 0x64, 0x62, 0x35, 0x15, 0xa5, 0x20, 0x6e, 0x98, 0x45, 0x43, 0x8b, 0x33, 0x23, 0x93, 0x7c, - 0xae, 0x25, 0x18, 0xbc, 0x52, 0xad, 0x5f, 0x88, 0xd7, 0x39, 0xfd, 0x8d, 0x02, 0xa9, 0x57, 0x84, - 0xe2, 0x16, 0xa6, 0x18, 0x6d, 0x40, 0xa6, 0x3d, 0x1a, 0x36, 0x2f, 0x70, 0xab, 0x35, 0x22, 0xae, - 0xcb, 0x55, 0xcf, 0x5b, 0x69, 0x66, 0x2b, 0x0a, 0x13, 0xda, 0x86, 0x2c, 0x87, 0xb4, 0x31, 0x25, - 0x3f, 0xe0, 0x89, 0x0f, 0x8d, 0x71, 0x28, 0x62, 0xbe, 0x7d, 0xe1, 0xf2, 0x22, 0x36, 0x20, 0xd3, - 0xa1, 0x74, 0xe8, 0x23, 0x55, 0x41, 0xca, 0x6c, 0x12, 0xa2, 0xff, 0xad, 0x40, 0x9c, 0xb5, 0x03, - 0x3d, 0x80, 0x58, 0xb7, 0x25, 0xd3, 0xc6, 0xba, 0x2d, 0x16, 0xdb, 0xe8, 0xda, 0xad, 0xa9, 0x2c, - 0x69, 0x66, 0xf3, 0xe8, 0x3f, 0xf4, 0x5a, 0xac, 0xf2, 0x16, 0x2f, 0x4d, 0xb7, 0x38, 0xd4, 0x4f, - 0xb4, 0x0d, 0xa9, 0x81, 0xac, 0x76, 0x25, 0xbe, 0xae, 0x6c, 0xa6, 0x77, 0xb2, 0xc1, 0x00, 0xaf, - 0x13, 0x96, 0x8f, 0xd2, 0x8f, 0x22, 0x4f, 0x20, 0x03, 0xa9, 0x6f, 0xaa, 0xe5, 0x72, 0xf5, 0xcc, - 0xb4, 0xc4, 0x11, 0xec, 0x15, 0x2b, 0x46, 0xc9, 0x28, 0xd6, 0x4d, 0x2d, 0x86, 0x00, 0xe6, 0xca, - 0x66, 0xd1, 0x30, 0x2d, 0x4d, 0x65, 0xc0, 0xda, 0xc1, 0x49, 0xdd, 0x60, 0x61, 0x71, 0xfd, 0x17, - 0x05, 0x92, 0x7b, 0xfd, 0xb1, 0x4b, 0xc9, 0x08, 0x7d, 0x0c, 0x09, 0xdb, 0x69, 0x11, 0xd6, 0x65, - 0x75, 0x33, 0xbd, 0xf3, 0x28, 0xa8, 0x43, 0x62, 0x78, 0x01, 0xae, 0x69, 0xd3, 0xd1, 0xc4, 0x12, - 0xe0, 0xdc, 0x21, 0xc0, 0x95, 0x11, 0x69, 0xa0, 0xf6, 0xc8, 0x44, 0x36, 0x8c, 0x3d, 0xa2, 0x27, - 0x90, 0xb8, 0xc4, 0xfd, 0x31, 0xe1, 0xad, 0x4a, 0xef, 0x68, 0xd3, 0xed, 0xb0, 0x84, 0x7b, 0x37, - 0xf6, 0x42, 0xd1, 0x5f, 0x80, 0xc6, 0x4c, 0x25, 0xfb, 0xb5, 0xe3, 0x4f, 0xec, 0x63, 0x88, 0xb3, - 0x44, 0x9c, 0x32, 0x2a, 0x9c, 0x7b, 0xf5, 0x5d, 0x40, 0x52, 0xe2, 0xa1, 0xd3, 0xb5, 0xbd, 0xaf, - 0xf1, 0x6e, 0xb1, 0xef, 0xc3, 0xa2, 0x8c, 0x2d, 0x13, 0x7c, 0x49, 0xbc, 0xe0, 0xa9, 0xa3, 0xd7, - 0x0d, 0x1f, 0x16, 0xd2, 0xf7, 0x0c, 0x92, 0x4d, 0x61, 0x96, 0x69, 0x16, 0x23, 0xfa, 0x66, 0x79, - 0x18, 0xfd, 0x4f, 0x05, 0xb2, 0xd2, 0x78, 0x86, 0x69, 0xb3, 0xe3, 0xf3, 0x7c, 0x0e, 0x09, 0x72, - 0x49, 0x6c, 0x2a, 0xbf, 0xcc, 0x27, 0x11, 0x2c, 0xa1, 0x80, 0xbc, 0xc9, 0xd0, 0x96, 0x08, 0xf2, - 0x2b, 0x8d, 0xdd, 0x54, 0x69, 0x50, 0xab, 0x7a, 0x07, 0xad, 0x9f, 0x40, 0x82, 0x27, 0x09, 0x4f, - 0x5a, 0x0a, 0xe2, 0x87, 0xd5, 0x52, 0x45, 0x53, 0xd8, 0x47, 0x5c, 0x36, 0x8b, 0xa7, 0x72, 0xc2, - 0x4e, 0x8e, 0xf9, 0xb4, 0xa9, 0xfa, 0x01, 0xa4, 0x8e, 0xc8, 0xe4, 0x94, 0x9d, 0x6a, 0xc4, 0x3c, - 0x6c, 0x85, 0xe7, 0x21, 0x9b, 0x17, 0xab, 0x36, 0xef, 0xed, 0xe1, 0x7c, 0xd1, 0x9e, 0xc8, 0x99, - 0xd0, 0x1f, 0x01, 0xec, 0x13, 0xea, 0x1d, 0xc8, 0x35, 0x2e, 0xfd, 0x53, 0x48, 0x73, 0xbf, 0x6c, - 0xa1, 0x4f, 0xad, 0xdc, 0x4e, 0x7d, 0x08, 0x50, 0xbb, 0x81, 0xfa, 0x3f, 0xc9, 0xdc, 0x80, 0xfb, - 0x06, 0xe9, 0x13, 0x4a, 0x66, 0x2b, 0x5d, 0x87, 0x8c, 0x3c, 0xbd, 0x59, 0x88, 0xdf, 0x15, 0xb8, - 0x1f, 0x9e, 0x88, 0xcf, 0x20, 0xd9, 0x74, 0x06, 0x03, 0x6c, 0xb7, 0xe4, 0x4c, 0x6c, 0x04, 0x4f, - 0x2b, 0x3c, 0x0c, 0x7b, 0x02, 0x68, 0x79, 0x11, 0x5e, 0x82, 0x58, 0x44, 0x45, 0xea, 0xed, 0x15, - 0x3d, 0x85, 0xa4, 0x64, 0x0c, 0x9f, 0x7d, 0x12, 0xd4, 0x9a, 0x59, 0xd7, 0x14, 0x76, 0xde, 0x86, - 0x59, 0x36, 0xd9, 0x76, 0xd1, 0xff, 0x51, 0x20, 0x75, 0x3c, 0x72, 0x86, 0x8e, 0x8b, 0xfb, 0x68, - 0x3b, 0x3c, 0xc6, 0xb9, 0xa0, 0x64, 0x0f, 0xf4, 0x7f, 0x46, 0xf7, 0x39, 0xcc, 0xf7, 0xc8, 0xe4, - 0x22, 0x5c, 0x41, 0x00, 0xea, 0x4d, 0x9c, 0x95, 0xea, 0xc9, 0x27, 0xbd, 0x1e, 0x39, 0xbe, 0x6c, - 0xff, 0x99, 0xf5, 0x8b, 0x4a, 0xd5, 0x30, 0x35, 0x85, 0xdd, 0xac, 0xa2, 0x0e, 0x61, 0xe0, 0x77, - 0x29, 0x73, 0x9f, 0x16, 0xcb, 0x27, 0xec, 0xc6, 0xd2, 0x20, 0x23, 0xfd, 0xc2, 0x12, 0xdf, 0x79, - 0x9b, 0x04, 0x78, 0xe5, 0xe7, 0x45, 0x3f, 0xc2, 0xc2, 0xd4, 0xbd, 0x89, 0xf4, 0xdb, 0x2f, 0xfd, - 0xdc, 0x7b, 0x77, 0xb8, 0x78, 0xf5, 0xb5, 0x37, 0x7f, 0xfc, 0xf5, 0x6b, 0x6c, 0x09, 0x65, 0x0b, - 0x97, 0xcf, 0x0b, 0xac, 0x0b, 0x85, 0x0e, 0x47, 0x35, 0x79, 0x9a, 0x73, 0x48, 0x79, 0xcb, 0x12, - 0x2d, 0x5d, 0x3b, 0x4c, 0x93, 0xfd, 0x9a, 0xc9, 0xad, 0x4d, 0xa7, 0x09, 0xae, 0x2e, 0x7d, 0x99, - 0xf3, 0x3f, 0x44, 0x0b, 0x3e, 0x3f, 0xbb, 0x93, 0xc6, 0x2e, 0xda, 0x87, 0x74, 0x60, 0x9b, 0xa2, - 0xa8, 0x9b, 0x20, 0xb0, 0x66, 0x73, 0x33, 0xb2, 0xeb, 0xf7, 0x50, 0x09, 0x32, 0xc1, 0xd5, 0x8a, - 0xde, 0x8d, 0x60, 0x0a, 0x2e, 0xdd, 0x1b, 0xa8, 0x1a, 0xbe, 0xa6, 0x1b, 0x2b, 0x8e, 0xca, 0x10, - 0x2a, 0x3a, 0xc7, 0x8b, 0xce, 0x22, 0xc4, 0x8a, 0x96, 0x9b, 0xce, 0xab, 0xbb, 0xec, 0xcb, 0xe5, - 0x5f, 0xd7, 0xcc, 0x24, 0xeb, 0xb7, 0x2d, 0x67, 0xfd, 0xde, 0xb6, 0x82, 0xbe, 0x05, 0x75, 0x9f, - 0x50, 0x14, 0xfa, 0x01, 0x70, 0xb5, 0xce, 0x72, 0xcb, 0xd7, 0xec, 0x32, 0x76, 0x95, 0x2b, 0x5c, - 0x44, 0x0f, 0x99, 0x42, 0x76, 0xe5, 0x17, 0x7e, 0xea, 0x91, 0xc9, 0x17, 0x5b, 0x5b, 0x3f, 0xa3, - 0x1a, 0xa8, 0xb5, 0x69, 0xca, 0xab, 0x35, 0x36, 0xb3, 0x7b, 0x72, 0x90, 0x72, 0xd7, 0x19, 0x77, - 0x95, 0x2d, 0x74, 0x0a, 0x73, 0x62, 0x7d, 0xa1, 0xd5, 0x20, 0x6f, 0x68, 0xa5, 0xcd, 0xa4, 0x96, - 0x62, 0xb7, 0x22, 0xc4, 0x7e, 0x0d, 0x09, 0xd1, 0xc6, 0x95, 0x88, 0xbd, 0x25, 0x58, 0x57, 0x67, - 0x6e, 0x34, 0xde, 0xc1, 0x63, 0x48, 0xd5, 0x6c, 0x3c, 0x74, 0x3b, 0x0e, 0x9d, 0x79, 0x16, 0xb3, - 0x84, 0x65, 0xb9, 0xb0, 0x07, 0x28, 0xc3, 0x84, 0xb9, 0x92, 0xe5, 0xe5, 0xb3, 0xef, 0x9e, 0xb6, - 0xbb, 0xb4, 0x33, 0x6e, 0xe4, 0x9b, 0xce, 0xa0, 0x30, 0x70, 0xdc, 0x71, 0x0f, 0x17, 0x1a, 0x7d, - 0xec, 0xd2, 0x42, 0xc4, 0x9f, 0x8b, 0xc6, 0x1c, 0x37, 0x7e, 0xf4, 0x6f, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x57, 0x7b, 0x42, 0x0c, 0x7a, 0x0c, 0x00, 0x00, + 0xae, 0x13, 0x83, 0xe3, 0xb6, 0x33, 0xa9, 0xfb, 0x4b, 0x8c, 0x6a, 0x63, 0x13, 0x70, 0x05, 0xb6, + 0xc7, 0xdd, 0x78, 0x2e, 0x70, 0x03, 0x0c, 0x20, 0x51, 0x74, 0x71, 0xca, 0x74, 0xba, 0xc9, 0xb6, + 0xcb, 0x6e, 0xfb, 0x1e, 0x5d, 0xe4, 0x31, 0xfa, 0x02, 0x5d, 0x74, 0xba, 0xe9, 0x4b, 0x74, 0xee, + 0x8f, 0x64, 0x09, 0x84, 0xed, 0x76, 0x65, 0xe9, 0x9c, 0xef, 0x7c, 0xe7, 0x3b, 0xe7, 0x1e, 0x9d, + 0x8b, 0xe1, 0xf1, 0x60, 0x68, 0x53, 0xbb, 0x3e, 0x7a, 0x9d, 0xeb, 0x63, 0x0b, 0xb7, 0x48, 0x9f, + 0x58, 0xd4, 0xf7, 0x98, 0xe5, 0x6e, 0x04, 0xd7, 0x96, 0xcc, 0x6a, 0xcb, 0xb6, 0x5b, 0x3d, 0x92, + 0xf3, 0x02, 0xb1, 0x35, 0x16, 0xb0, 0xcc, 0x7b, 0x93, 0x2e, 0xd2, 0x1f, 0x50, 0xd7, 0xb9, 0x26, + 0x9d, 0x78, 0xd0, 0xc9, 0x61, 0xcb, 0xb2, 0x29, 0xa6, 0x1d, 0xdb, 0x72, 0xa4, 0xf7, 0x19, 0xff, + 0xd3, 0xd8, 0x6e, 0x11, 0x6b, 0xdb, 0x79, 0x83, 0x5b, 0x2d, 0x32, 0xcc, 0xd9, 0x03, 0x8e, 0x98, + 0x46, 0xeb, 0xbf, 0x29, 0xb0, 0x54, 0xb6, 0x9b, 0xe4, 0x90, 0xe0, 0x1e, 0x6d, 0xef, 0xb7, 0x49, + 0xa3, 0x6b, 0x92, 0x1f, 0x46, 0xc4, 0xa1, 0xe8, 0x2b, 0x88, 0x0d, 0x86, 0x76, 0x9d, 0xac, 0x28, + 0xeb, 0xca, 0xe6, 0x83, 0xdd, 0xcd, 0xac, 0xaf, 0x98, 0xf0, 0x90, 0xec, 0x09, 0xc3, 0x9b, 0x22, + 0x4c, 0x7f, 0x09, 0x31, 0xfe, 0x8e, 0x92, 0x10, 0x3f, 0x2d, 0x1f, 0x97, 0x2b, 0xe7, 0x65, 0xed, + 0x1e, 0x5a, 0x80, 0xe4, 0xa1, 0x91, 0x2f, 0xd5, 0x0e, 0x8b, 0x65, 0xa3, 0x5a, 0xd5, 0x14, 0x94, + 0x82, 0x44, 0xa9, 0x78, 0x66, 0xf0, 0xb7, 0x08, 0xba, 0x0f, 0xf3, 0xa6, 0x91, 0x2f, 0x08, 0xa7, + 0xaa, 0xbf, 0x53, 0x60, 0x79, 0x2a, 0x97, 0x33, 0xb0, 0x2d, 0x87, 0xa0, 0xaf, 0x21, 0xe6, 0x50, + 0x4c, 0x5d, 0x7d, 0x1f, 0xdd, 0xa8, 0x4f, 0xc4, 0x64, 0xab, 0x2c, 0xc0, 0x14, 0x71, 0xfa, 0x25, + 0xc4, 0xf8, 0x7b, 0x50, 0x60, 0x12, 0xe2, 0x42, 0xe0, 0x85, 0xa6, 0x30, 0x39, 0xa7, 0x65, 0xf7, + 0x35, 0x82, 0xe6, 0x21, 0x96, 0x67, 0x62, 0x35, 0x15, 0x25, 0x20, 0x5a, 0x30, 0xf2, 0x05, 0x2d, + 0xca, 0x8c, 0x4c, 0xf2, 0x85, 0x16, 0x63, 0xf0, 0x72, 0xa5, 0x76, 0x29, 0x5e, 0xe7, 0xf4, 0xb7, + 0x0a, 0x24, 0x5e, 0x11, 0x8a, 0x9b, 0x98, 0x62, 0xb4, 0x01, 0xa9, 0xd6, 0x70, 0xd0, 0xb8, 0xc4, + 0xcd, 0xe6, 0x90, 0x38, 0x0e, 0x57, 0x3d, 0x6f, 0x26, 0x99, 0x2d, 0x2f, 0x4c, 0x68, 0x07, 0xd2, + 0x1c, 0xd2, 0xc2, 0x94, 0xbc, 0xc1, 0x63, 0x0f, 0x1a, 0xe1, 0x50, 0xc4, 0x7c, 0x07, 0xc2, 0xe5, + 0x46, 0x6c, 0x40, 0xaa, 0x4d, 0xe9, 0xc0, 0x43, 0xaa, 0x82, 0x94, 0xd9, 0x24, 0x44, 0xff, 0x5b, + 0x81, 0x28, 0x6b, 0x07, 0x7a, 0x00, 0x91, 0x4e, 0x53, 0xa6, 0x8d, 0x74, 0x9a, 0x2c, 0xb6, 0xde, + 0xb1, 0x9a, 0x13, 0x59, 0x92, 0xcc, 0xe6, 0xd2, 0x3f, 0x73, 0x5b, 0xac, 0xf2, 0x16, 0x2f, 0x4d, + 0xb6, 0x38, 0xd0, 0x4f, 0xb4, 0x03, 0x89, 0xbe, 0xac, 0x76, 0x25, 0xba, 0xae, 0x6c, 0x26, 0x77, + 0xd3, 0xfe, 0x00, 0xb7, 0x13, 0xa6, 0x87, 0xd2, 0x8f, 0x43, 0x4f, 0x20, 0x05, 0x89, 0x6f, 0x2b, + 0xa5, 0x52, 0xe5, 0xdc, 0x30, 0xc5, 0x11, 0xec, 0xe7, 0xcb, 0x85, 0x62, 0x21, 0x5f, 0x33, 0xb4, + 0x08, 0x02, 0x98, 0x2b, 0x19, 0xf9, 0x82, 0x61, 0x6a, 0x2a, 0x03, 0x56, 0x0f, 0x4f, 0x6b, 0x05, + 0x16, 0x16, 0xd5, 0x7f, 0x51, 0x20, 0xbe, 0xdf, 0x1b, 0x39, 0x94, 0x0c, 0xd1, 0x27, 0x10, 0xb3, + 0xec, 0x26, 0x61, 0x5d, 0x56, 0x37, 0x93, 0xbb, 0x8f, 0xfc, 0x3a, 0x24, 0x86, 0x17, 0xe0, 0x18, + 0x16, 0x1d, 0x8e, 0x4d, 0x01, 0xce, 0x1c, 0x01, 0x5c, 0x1b, 0x91, 0x06, 0x6a, 0x97, 0x8c, 0x65, + 0xc3, 0xd8, 0x23, 0x7a, 0x02, 0xb1, 0x2b, 0xdc, 0x1b, 0x11, 0xde, 0xaa, 0xe4, 0xae, 0x36, 0xd9, + 0x0e, 0x53, 0xb8, 0xf7, 0x22, 0x2f, 0x14, 0xfd, 0x05, 0x68, 0xcc, 0x54, 0xb4, 0x5e, 0xdb, 0xde, + 0xc4, 0x3e, 0x86, 0x28, 0x4b, 0xc4, 0x29, 0xc3, 0xc2, 0xb9, 0x57, 0xdf, 0x03, 0x24, 0x25, 0x1e, + 0xd9, 0x1d, 0xcb, 0xfd, 0x1a, 0xef, 0x16, 0xfb, 0x21, 0x2c, 0xca, 0xd8, 0x12, 0xc1, 0x57, 0xc4, + 0x0d, 0x9e, 0x38, 0x7a, 0xbd, 0xe0, 0xc1, 0x02, 0xfa, 0xb6, 0x21, 0xde, 0x10, 0x66, 0x99, 0x66, + 0x31, 0xa4, 0x6f, 0xa6, 0x8b, 0xd1, 0xff, 0x54, 0x20, 0x2d, 0x8d, 0xe7, 0x98, 0x36, 0xda, 0x1e, + 0xcf, 0x17, 0x10, 0x23, 0x57, 0xc4, 0xa2, 0xf2, 0xcb, 0x7c, 0x12, 0xc2, 0x12, 0x08, 0xc8, 0x1a, + 0x0c, 0x6d, 0x8a, 0x20, 0xaf, 0xd2, 0xc8, 0x4d, 0x95, 0xfa, 0xb5, 0xaa, 0x77, 0xd0, 0xfa, 0x29, + 0xc4, 0x78, 0x92, 0xe0, 0xa4, 0x25, 0x20, 0x7a, 0x54, 0x29, 0x96, 0x35, 0x85, 0x7d, 0xc4, 0x25, + 0x23, 0x7f, 0x26, 0x27, 0xec, 0xf4, 0x84, 0x4f, 0x9b, 0xaa, 0x1f, 0x42, 0xe2, 0x98, 0x8c, 0xcf, + 0xd8, 0xa9, 0x86, 0xcc, 0xc3, 0x56, 0x70, 0x1e, 0xd2, 0x59, 0xb1, 0x98, 0xb3, 0xee, 0xd6, 0xce, + 0xe6, 0xad, 0xb1, 0x9c, 0x09, 0xfd, 0x11, 0xc0, 0x01, 0xa1, 0xee, 0x81, 0x4c, 0x71, 0xe9, 0x9f, + 0x41, 0x92, 0xfb, 0x65, 0x0b, 0x3d, 0x6a, 0xe5, 0x76, 0xea, 0x23, 0x80, 0xea, 0x0d, 0xd4, 0xff, + 0x49, 0xe6, 0x06, 0xdc, 0x2f, 0x90, 0x1e, 0xa1, 0x64, 0xb6, 0xd2, 0x75, 0x48, 0xc9, 0xd3, 0x9b, + 0x85, 0xf8, 0x5d, 0x81, 0xfb, 0xc1, 0x89, 0xf8, 0x1c, 0xe2, 0x0d, 0xbb, 0xdf, 0xc7, 0x56, 0x53, + 0xce, 0xc4, 0x86, 0xff, 0xb4, 0x82, 0xc3, 0xb0, 0x2f, 0x80, 0xa6, 0x1b, 0xe1, 0x26, 0x88, 0x84, + 0x54, 0xa4, 0xde, 0x5e, 0xd1, 0x53, 0x88, 0x4b, 0xc6, 0xe0, 0xd9, 0xc7, 0x41, 0xad, 0x1a, 0x35, + 0x4d, 0x61, 0xe7, 0x5d, 0x30, 0x4a, 0x06, 0xdb, 0x2e, 0xfa, 0x3f, 0x0a, 0x24, 0x4e, 0x86, 0xf6, + 0xc0, 0x76, 0x70, 0x0f, 0xed, 0x04, 0xc7, 0x38, 0xe3, 0x97, 0xec, 0x82, 0xfe, 0xcf, 0xe8, 0x3e, + 0x87, 0xf9, 0x2e, 0x19, 0x5f, 0x06, 0x2b, 0xf0, 0x41, 0xdd, 0x89, 0x33, 0x13, 0x5d, 0xf9, 0xa4, + 0xd7, 0x42, 0xc7, 0x97, 0xed, 0x3f, 0xa3, 0x76, 0x59, 0xae, 0x14, 0x0c, 0x4d, 0x61, 0x37, 0xab, + 0xa8, 0x43, 0x18, 0xf8, 0x5d, 0xca, 0xdc, 0x67, 0xf9, 0xd2, 0x29, 0xbb, 0xb1, 0x34, 0x48, 0x49, + 0xbf, 0xb0, 0x44, 0x77, 0xdf, 0xc5, 0x01, 0x5e, 0x79, 0x79, 0xd1, 0x8f, 0xb0, 0x30, 0x71, 0x6f, + 0x22, 0xfd, 0xf6, 0x4b, 0x3f, 0xf3, 0xc1, 0x1d, 0x2e, 0x5e, 0x7d, 0xed, 0xed, 0x1f, 0x7f, 0xfd, + 0x1a, 0x59, 0x42, 0xe9, 0xdc, 0xd5, 0xf3, 0x1c, 0xeb, 0x42, 0xae, 0xcd, 0x51, 0x0d, 0x9e, 0xe6, + 0x02, 0x12, 0xee, 0xb2, 0x44, 0x4b, 0x53, 0x87, 0x69, 0xb0, 0xdf, 0x3e, 0x99, 0xb5, 0xc9, 0x34, + 0xfe, 0xd5, 0xa5, 0x2f, 0x73, 0xfe, 0x87, 0x68, 0xc1, 0xe3, 0x67, 0x77, 0xd2, 0xc8, 0x41, 0x07, + 0x90, 0xf4, 0x6d, 0x53, 0x14, 0x76, 0x13, 0xf8, 0xd6, 0x6c, 0x66, 0x46, 0x76, 0xfd, 0x1e, 0x2a, + 0x42, 0xca, 0xbf, 0x5a, 0xd1, 0xfb, 0x21, 0x4c, 0xfe, 0xa5, 0x7b, 0x03, 0x55, 0xdd, 0xd3, 0x74, + 0x63, 0xc5, 0x61, 0x19, 0x02, 0x45, 0x67, 0x78, 0xd1, 0x69, 0x84, 0x58, 0xd1, 0x72, 0xd3, 0xb9, + 0x75, 0x97, 0x3c, 0xb9, 0xfc, 0xeb, 0x9a, 0x99, 0x64, 0xfd, 0xb6, 0xe5, 0xac, 0xdf, 0xdb, 0x51, + 0xd0, 0x77, 0xa0, 0x1e, 0x10, 0x8a, 0x02, 0x3f, 0x00, 0xae, 0xd7, 0x59, 0x66, 0x79, 0xca, 0x2e, + 0x63, 0x57, 0xb9, 0xc2, 0x45, 0xf4, 0x90, 0x29, 0x64, 0x57, 0x7e, 0xee, 0xa7, 0x2e, 0x19, 0x7f, + 0xb9, 0xb5, 0xf5, 0x33, 0xaa, 0x82, 0x5a, 0x9d, 0xa4, 0xbc, 0x5e, 0x63, 0x33, 0xbb, 0x27, 0x07, + 0x29, 0x33, 0xcd, 0xb8, 0xa7, 0x6c, 0xa1, 0x33, 0x98, 0x13, 0xeb, 0x0b, 0xad, 0xfa, 0x79, 0x03, + 0x2b, 0x6d, 0x26, 0xb5, 0x14, 0xbb, 0x15, 0x22, 0xf6, 0x1b, 0x88, 0x89, 0x36, 0xae, 0x84, 0xec, + 0x2d, 0xc1, 0xba, 0x3a, 0x73, 0xa3, 0xf1, 0x0e, 0x9e, 0x40, 0xa2, 0x6a, 0xe1, 0x81, 0xd3, 0xb6, + 0xe9, 0xcc, 0xb3, 0x98, 0x25, 0x2c, 0xcd, 0x85, 0x3d, 0x40, 0x29, 0x26, 0xcc, 0x91, 0x2c, 0x2f, + 0xb7, 0xbf, 0x7f, 0xda, 0xea, 0xd0, 0xf6, 0xa8, 0x9e, 0x6d, 0xd8, 0xfd, 0x5c, 0xdf, 0x76, 0x46, + 0x5d, 0x9c, 0xab, 0xf7, 0xb0, 0x43, 0x73, 0x21, 0xff, 0x8a, 0xd4, 0xe7, 0xb8, 0xf1, 0xe3, 0x7f, + 0x03, 0x00, 0x00, 0xff, 0xff, 0x28, 0xf6, 0xde, 0xbe, 0xa8, 0x0c, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/protobuf/management/management.proto b/protobuf/management/management.proto index 621603b..e3c8429 100644 --- a/protobuf/management/management.proto +++ b/protobuf/management/management.proto @@ -17,6 +17,7 @@ syntax = "proto3"; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/api/annotations.proto"; +import "protoc-gen-swagger/options/annotations.proto"; package management; @@ -152,6 +153,15 @@ message GetRequest { } message GetResponse { +// option (grpc.gateway.protoc_gen_swagger.options.openapiv2_schema) = { +// json_schema: { +// required: ["value"] +// }, +// example: { +// value: '{ "fields": { "field1": "Get Example", "field2": "This is an example Get response." } }' +// } +// }; +// google.protobuf.Any value = 1 [(grpc.gateway.protoc_gen_swagger.options.openapiv2_field) = {type: 6}]; google.protobuf.Any value = 1; } diff --git a/protobuf/management/management.swagger.json b/protobuf/management/management.swagger.json new file mode 100644 index 0000000..18f1ed1 --- /dev/null +++ b/protobuf/management/management.swagger.json @@ -0,0 +1,409 @@ +{ + "swagger": "2.0", + "info": { + "title": "protobuf/management/management.proto", + "version": "version not set" + }, + "schemes": [ + "http", + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/v1/cluster/status": { + "get": { + "operationId": "ClusterInfo", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/managementClusterInfoResponse" + } + } + }, + "tags": [ + "Management" + ] + } + }, + "/v1/data/{key}": { + "get": { + "operationId": "Get", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/managementGetResponse" + } + } + }, + "parameters": [ + { + "name": "key", + "in": "path", + "required": true, + "type": "string" + } + ], + "tags": [ + "Management" + ] + }, + "delete": { + "operationId": "Delete", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "properties": {} + } + } + }, + "parameters": [ + { + "name": "key", + "in": "path", + "required": true, + "type": "string" + } + ], + "tags": [ + "Management" + ] + }, + "put": { + "operationId": "Set", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "properties": {} + } + } + }, + "parameters": [ + { + "name": "key", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/managementSetRequest" + } + } + ], + "tags": [ + "Management" + ] + } + }, + "/v1/node/healthcheck": { + "get": { + "operationId": "NodeHealthCheck", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/managementNodeHealthCheckResponse" + } + } + }, + "parameters": [ + { + "name": "probe", + "in": "query", + "required": false, + "type": "string", + "enum": [ + "UNKNOWN", + "HEALTHINESS", + "LIVENESS", + "READINESS" + ], + "default": "UNKNOWN" + } + ], + "tags": [ + "Management" + ] + } + }, + "/v1/node/status": { + "get": { + "operationId": "NodeInfo", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/managementNodeInfoResponse" + } + } + }, + "tags": [ + "Management" + ] + } + }, + "/v1/snapshot": { + "get": { + "operationId": "Snapshot", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "properties": {} + } + } + }, + "tags": [ + "Management" + ] + } + } + }, + "definitions": { + "NodeHealthCheckRequestProbe": { + "type": "string", + "enum": [ + "UNKNOWN", + "HEALTHINESS", + "LIVENESS", + "READINESS" + ], + "default": "UNKNOWN" + }, + "WatchResponseCommand": { + "type": "string", + "enum": [ + "UNKNOWN", + "SET", + "DELETE" + ], + "default": "UNKNOWN" + }, + "managementCluster": { + "type": "object", + "properties": { + "nodes": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/managementNode" + } + } + } + }, + "managementClusterInfoResponse": { + "type": "object", + "properties": { + "cluster": { + "$ref": "#/definitions/managementCluster" + } + } + }, + "managementClusterWatchResponse": { + "type": "object", + "properties": { + "event": { + "$ref": "#/definitions/managementClusterWatchResponseEvent" + }, + "node": { + "$ref": "#/definitions/managementNode" + }, + "cluster": { + "$ref": "#/definitions/managementCluster" + } + } + }, + "managementClusterWatchResponseEvent": { + "type": "string", + "enum": [ + "UNKNOWN", + "JOIN", + "LEAVE", + "UPDATE" + ], + "default": "UNKNOWN" + }, + "managementGetResponse": { + "type": "object", + "properties": { + "value": { + "$ref": "#/definitions/protobufAny", + "title": "option (grpc.gateway.protoc_gen_swagger.options.openapiv2_schema) = {\n json_schema: {\n required: [\"value\"]\n },\n example: {\n value: '{ \"fields\": { \"field1\": \"Get Example\", \"field2\": \"This is an example Get response.\" } }'\n }\n };\n google.protobuf.Any value = 1 [(grpc.gateway.protoc_gen_swagger.options.openapiv2_field) = {type: 6}];" + } + } + }, + "managementMetadata": { + "type": "object", + "properties": { + "grpc_address": { + "type": "string" + }, + "grpc_gateway_address": { + "type": "string" + }, + "http_address": { + "type": "string" + } + } + }, + "managementNode": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "bind_address": { + "type": "string" + }, + "state": { + "$ref": "#/definitions/managementNodeState" + }, + "metadata": { + "$ref": "#/definitions/managementMetadata" + } + } + }, + "managementNodeHealthCheckResponse": { + "type": "object", + "properties": { + "state": { + "$ref": "#/definitions/managementNodeHealthCheckResponseState" + } + } + }, + "managementNodeHealthCheckResponseState": { + "type": "string", + "enum": [ + "UNKNOWN", + "HEALTHY", + "UNHEALTHY", + "ALIVE", + "DEAD", + "READY", + "NOT_READY" + ], + "default": "UNKNOWN" + }, + "managementNodeInfoResponse": { + "type": "object", + "properties": { + "node": { + "$ref": "#/definitions/managementNode" + } + } + }, + "managementNodeState": { + "type": "string", + "enum": [ + "UNKNOWN", + "FOLLOWER", + "CANDIDATE", + "LEADER", + "SHUTDOWN" + ], + "default": "UNKNOWN" + }, + "managementSetRequest": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "value": { + "$ref": "#/definitions/protobufAny" + } + } + }, + "managementWatchResponse": { + "type": "object", + "properties": { + "command": { + "$ref": "#/definitions/WatchResponseCommand" + }, + "key": { + "type": "string" + }, + "value": { + "$ref": "#/definitions/protobufAny" + } + } + }, + "protobufAny": { + "type": "object", + "properties": { + "type_url": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + }, + "value": { + "type": "string", + "format": "byte", + "description": "Must be a valid serialized protocol buffer of the above specified type." + } + }, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + }, + "runtimeStreamError": { + "type": "object", + "properties": { + "grpc_code": { + "type": "integer", + "format": "int32" + }, + "http_code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "http_status": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + } + }, + "x-stream-definitions": { + "managementClusterWatchResponse": { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/managementClusterWatchResponse" + }, + "error": { + "$ref": "#/definitions/runtimeStreamError" + } + }, + "title": "Stream result of managementClusterWatchResponse" + }, + "managementWatchResponse": { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/managementWatchResponse" + }, + "error": { + "$ref": "#/definitions/runtimeStreamError" + } + }, + "title": "Stream result of managementWatchResponse" + } + } +} From 30da0b8653d480ee11431184016dbca437bca490 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Wed, 28 Aug 2019 20:15:42 +0900 Subject: [PATCH 37/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index ce6fb19..512fe2c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,6 +9,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] ### Added +- Add swagger specification experimentaly #107 ### Changed From 31e11924c4d4c4b4322320dcb5bdfabc3d9bdfa9 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Wed, 28 Aug 2019 20:30:53 +0900 Subject: [PATCH 38/76] Update CHANGES.md --- CHANGES.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 512fe2c..24823ef 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,6 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). - ## [Unreleased] ### Added @@ -13,6 +12,13 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ### Changed +## [v0.8.0] + +### Added +- Add swagger specification experimentaly #107 + +### Changed + - New CLI #82 - Split protobuf into components #84 - Change subcommands #85 From 4fab34bad077725bab05cccafbfec88d022fa8a7 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 17 Sep 2019 20:03:24 +0900 Subject: [PATCH 39/76] Update go version and dependencies (#109) * Update dependencies * Update docker base image --- Dockerfile | 2 +- go.mod | 59 ++++------ go.sum | 250 ++++++++++++++--------------------------- indexer/raft_server.go | 55 +++++---- manager/raft_server.go | 51 +++++---- protobuf/util_test.go | 2 +- 6 files changed, 157 insertions(+), 262 deletions(-) diff --git a/Dockerfile b/Dockerfile index bda65ea..4da3182 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM golang:1.12.7-stretch +FROM golang:1.13.0-stretch ARG VERSION diff --git a/go.mod b/go.mod index 2adb2ee..789c17d 100644 --- a/go.mod +++ b/go.mod @@ -1,11 +1,10 @@ module github.com/mosuka/blast -go 1.12 +go 1.13 require ( - cloud.google.com/go v0.44.3 // indirect - github.com/blevesearch/bleve v0.7.0 - github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3 // indirect + github.com/blevesearch/bleve v0.8.0 + github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040 // indirect github.com/blevesearch/cld2 v0.0.0-20150916130542-10f17c049ec9 // indirect github.com/blevesearch/snowballstem v0.0.0-20180110192139-26b06a2c243d // indirect github.com/couchbase/ghistogram v0.0.0-20170308220240-d910dd063dd6 // indirect @@ -13,54 +12,36 @@ require ( github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 // indirect - github.com/dgryski/go-farm v0.0.0-20190323231341-8198c7b169ec // indirect + github.com/etcd-io/bbolt v1.3.3 // indirect github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect - github.com/gogo/protobuf v1.1.1 + github.com/gogo/protobuf v1.3.0 github.com/golang/protobuf v1.3.2 github.com/google/go-cmp v0.3.1 - github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70 // indirect - github.com/gorilla/mux v1.7.0 - github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 + github.com/gorilla/mux v1.7.3 + github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/grpc-ecosystem/grpc-gateway v1.9.6 - github.com/hashicorp/golang-lru v0.5.3 // indirect - github.com/hashicorp/raft v1.1.0 + github.com/grpc-ecosystem/grpc-gateway v1.11.1 + github.com/hashicorp/raft v1.1.1 github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477 - github.com/ikawaha/kagome.ipadic v1.0.1 // indirect + github.com/ikawaha/kagome.ipadic v1.1.0 // indirect github.com/imdario/mergo v0.3.7 github.com/jmhodges/levigo v1.0.0 // indirect - github.com/kr/pty v1.1.8 // indirect - github.com/markthethomas/raft-badger v0.0.0-20190420151455-b37d14e77a69 github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217 - github.com/mosuka/bbadger v0.0.0-20190319122948-67a91aedfe68 + github.com/mosuka/bbadger v0.1.0 github.com/natefinch/lumberjack v2.0.0+incompatible - github.com/prometheus/client_golang v0.9.2 - github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 // indirect - github.com/prometheus/common v0.2.0 // indirect - github.com/prometheus/procfs v0.0.0-20190322151404-55ae3d9d5573 // indirect - github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 // indirect - github.com/rogpeppe/fastuuid v1.2.0 // indirect - github.com/rogpeppe/go-internal v1.3.1 // indirect - github.com/stretchr/objx v0.1.1 + github.com/prometheus/client_golang v1.1.0 + github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 // indirect + github.com/stretchr/objx v0.2.0 github.com/syndtr/goleveldb v1.0.0 // indirect - github.com/tebeka/snowball v0.0.0-20130405174319-16e884df4e19 // indirect - github.com/tecbot/gorocksdb v0.0.0-20181010114359-8752a9433481 // indirect - github.com/urfave/cli v1.20.0 - go.uber.org/atomic v1.4.0 // indirect - go.uber.org/multierr v1.1.0 // indirect + github.com/tebeka/snowball v0.3.0 // indirect + github.com/tecbot/gorocksdb v0.0.0-20190705090504-162552197222 // indirect + github.com/urfave/cli v1.22.1 + go.etcd.io/bbolt v1.3.3 // indirect go.uber.org/zap v1.10.0 - golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 // indirect - golang.org/x/image v0.0.0-20190823064033-3a9bac650e44 // indirect - golang.org/x/mobile v0.0.0-20190826170111-cafc553e1ac5 // indirect - golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 // indirect - golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 // indirect - golang.org/x/tools v0.0.0-20190827205025-b29f5f60c37a // indirect - google.golang.org/api v0.9.0 // indirect - google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 - google.golang.org/grpc v1.23.0 + google.golang.org/genproto v0.0.0-20190916214212-f660b8655731 + google.golang.org/grpc v1.23.1 gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/yaml.v2 v2.2.2 - honnef.co/go/tools v0.0.1-2019.2.2 // indirect ) diff --git a/go.sum b/go.sum index 1fe5def..1202765 100644 --- a/go.sum +++ b/go.sum @@ -1,33 +1,25 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/RoaringBitmap/roaring v0.4.17 h1:oCYFIFEMSQZrLHpywH7919esI1VSrQZ0pJXkZPGIJ78= github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= -github.com/Smerity/govarint v0.0.0-20150407073650-7265e41f48f1 h1:G/NOANWMQev0CftoyxQwtRakdyNNNMB3qxkt/tj1HGs= github.com/Smerity/govarint v0.0.0-20150407073650-7265e41f48f1/go.mod h1:o80NPAib/LOl8Eysqppjj7kkGkqz++eqzYGlvROpDcQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/blevesearch/bleve v0.7.0 h1:znyZ3zjsh2Scr60vszs7rbF29TU6i1q9bfnZf1vh0Ac= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blevesearch/bleve v0.7.0/go.mod h1:Y2lmIkzV6mcNfAnAdOd+ZxHkHchhBfU/xroGIp61wfw= -github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3 h1:U6vnxZrTfItfiUiYx0lf/LgHjRSfaKK5QHSom3lEbnA= -github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3/go.mod h1:WH+MU2F4T0VmSdaPX+Wu5GYoZBrYWdOZWSjzvYcDmqQ= +github.com/blevesearch/bleve v0.8.0 h1:DCoCrxscCXrlzVWK92k7Vq4d28lTAFuigVmcgIX0VCo= +github.com/blevesearch/bleve v0.8.0/go.mod h1:Y2lmIkzV6mcNfAnAdOd+ZxHkHchhBfU/xroGIp61wfw= +github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040 h1:SjYVcfJVZoCfBlg+fkaq2eoZHTf5HaJfaTeTkOtyfHQ= +github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040/go.mod h1:WH+MU2F4T0VmSdaPX+Wu5GYoZBrYWdOZWSjzvYcDmqQ= github.com/blevesearch/cld2 v0.0.0-20150916130542-10f17c049ec9 h1:ZPImXwzC+ICkkSYlPP9mMVgQlZH24+56rIEUjVxfFnY= github.com/blevesearch/cld2 v0.0.0-20150916130542-10f17c049ec9/go.mod h1:PN0QNTLs9+j1bKy3d/GB/59wsNBFC4sWLWG3k69lWbc= github.com/blevesearch/go-porterstemmer v1.0.2 h1:qe7n69gBd1OLY5sHKnxQHIbzn0LNJA4hpAf+5XDxV2I= @@ -47,7 +39,8 @@ github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498 h1:b8rnI4JWbakUNfpm github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498/go.mod h1:mGI1GcdgmlL3Imff7Z+OjkkQ8qSKr443BuZ+qFgWbPQ= github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe h1:2o6Y7KMjJNsuMTF8f2H2eTKRhqH7+bQbjr+D+LnhE5M= github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe/go.mod h1:prYTC8EgTu3gwbqJihkud9zRXISvyulAplQ6exdCo1g= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d h1:SwD98825d6bdB+pEuTxWOXiSjBrHdOl/UVp75eI7JT8= github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= @@ -57,13 +50,16 @@ github.com/cznic/strutil v0.0.0-20181122101858-275e90344537/go.mod h1:AHHPPPXTw0 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/badger v1.5.4 h1:gVTrpUTbbr/T24uvoCaqY2KSHfNLVGm0w+hbee2HMeg= -github.com/dgraph-io/badger v1.5.4/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= +github.com/dgraph-io/badger v2.0.0-rc.2.0.20190626232749-b116882676f2+incompatible h1:xeEWHqaQFcm44dJsZYN6JIiLCHG+DciygDfGvIfbkv8= +github.com/dgraph-io/badger v2.0.0-rc.2.0.20190626232749-b116882676f2+incompatible/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f h1:dDxpBYafY/GYpcl+LS4Bn3ziLPuEdGRkRjYAbSlWxSA= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-farm v0.0.0-20190323231341-8198c7b169ec h1:sElGDs3V8VdCxH5tWi0ycWJzteOPLJ3HtItSSKI95PY= -github.com/dgryski/go-farm v0.0.0-20190323231341-8198c7b169ec/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM= +github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= @@ -72,7 +68,6 @@ github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQD github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= @@ -80,99 +75,88 @@ github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493 h1:OTanQnFt0bi5 github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0 h1:G8O7TerXerS4F6sx9OV7/nRfJdnXgHZu/S/7F2SN+UE= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.7.0 h1:tOSd0UKHQd6urX6ApfOn4XdBMY6Sh1MfxV3kmaazO+U= -github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg= +github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.6 h1:8p0pcgLlw2iuZVsdHdPaMUXFOA+6gDixcXbHEMzSyW8= -github.com/grpc-ecosystem/grpc-gateway v1.9.6/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.11.1 h1:/dBYI+n4xIL+Y9SKXQrjlKTmJJDwCSlNLRwZ5nBhIek= +github.com/grpc-ecosystem/grpc-gateway v1.11.1/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= -github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/raft v1.0.0 h1:htBVktAOtGs4Le5Z7K8SF5H2+oWsQFYVmOgH5loro7Y= -github.com/hashicorp/raft v1.0.0/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= -github.com/hashicorp/raft v1.1.0 h1:qPMePEczgbkiQsqCsRfuHRqvDUO+zmAInDaD5ptXlq0= github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= +github.com/hashicorp/raft v1.1.1 h1:HJr7UE1x/JrJSc9Oy6aDBHtNHUUBHjcQjTgvUVihoZs= +github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477 h1:bLsrEmB2NUwkHH18FOJBIa04wOV2RQalJrcafTYu6Lg= github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477/go.mod h1:aUF6HQr8+t3FC/ZHAC+pZreUBhTaxumuu3L+d37uRxk= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ikawaha/kagome.ipadic v1.0.1 h1:4c/tx3Rga6LvtTouEdvodcfeWWTttATZg8XIH8lRHG4= -github.com/ikawaha/kagome.ipadic v1.0.1/go.mod h1:Nh0/WFhzTQYw9XlsOxAuhdSZ1/xfi7vn5pjqb6FBwJE= +github.com/ikawaha/kagome.ipadic v1.1.0 h1:9hzwhcklEL4Cmp+lM9HQfmDg2nhB43Fe1n9UUY6mifY= +github.com/ikawaha/kagome.ipadic v1.1.0/go.mod h1:DPSBbU0czaJhAb/5uKQZHMc9MTVRpDugJfX+HddPHHg= github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/markthethomas/raft-badger v0.0.0-20190420151455-b37d14e77a69 h1:/ylv98AIMI8XzkeqJGmJSTc/zRQrNllmYWW5b2MoyD4= -github.com/markthethomas/raft-badger v0.0.0-20190420151455-b37d14e77a69/go.mod h1:H6ZQv8h8j98nwnF25XLGalSOLhFRjFQ2GGNZRNkkw8Y= github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217 h1:oWyemD7bnPAGRGGPE22W1Z+kspkC7Uclz5rdzgxxiwk= github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217/go.mod h1:5JLTyA+23fYz/BfD5Hn736mGEZopzWtEx1pdNfnTp8k= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mosuka/bbadger v0.0.0-20190319122948-67a91aedfe68 h1:LE+XIZDiXr587to+tCWKYPTrtQOmJzOxzcwhiDQIJbE= -github.com/mosuka/bbadger v0.0.0-20190319122948-67a91aedfe68/go.mod h1:qy5KaSXSrNqdWFS/e3wWNFXZPRDnqjX79iRhOveUpfc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mosuka/bbadger v0.1.0 h1:yc0UbkZFREZjzcNqXJp0/DPOTWld9Vq/S/MTHOb4x14= +github.com/mosuka/bbadger v0.1.0/go.mod h1:Er3F7xRxkBmVSIhqjA9CSk7ovFqfdcZDdzFBWJqfwog= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -183,7 +167,7 @@ github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= @@ -194,26 +178,31 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190322151404-55ae3d9d5573 h1:gAuD3LIrjkoOOPLlhGlZWZXztrQII9a9kT6HS5jFtSY= -github.com/prometheus/procfs v0.0.0-20190322151404-55ae3d9d5573/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 h1:YDeskXpkNDhPdWN3REluVa46HQOVuVkjkd2sWnrABNQ= -github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= +github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff h1:86HlEv0yBCry9syNuylzqznKXDK11p6D0DT596yNMys= @@ -221,26 +210,28 @@ github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:K github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 h1:JNEGSiWg6D3lcBCMCBqN3ELniXujt+0QNHLhNnO0w3s= github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2/go.mod h1:mjqs7N0Q6m5HpR7QfXVBZXZWSqTjQLeTujjA/xUp2uw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/tebeka/snowball v0.0.0-20130405174319-16e884df4e19 h1:ma5vyZGiQ7pJ6oAlz39EFxVv6uQmfD4XXgdf528hsUI= -github.com/tebeka/snowball v0.0.0-20130405174319-16e884df4e19/go.mod h1:2/ITuYMfAxT7SEIngRdPtyFD4rfTsutLuRfmt6sWio8= -github.com/tecbot/gorocksdb v0.0.0-20181010114359-8752a9433481 h1:HOxvxvnntLiPn123Fk+twfUhCQdMDaqmb0cclArW0T0= -github.com/tecbot/gorocksdb v0.0.0-20181010114359-8752a9433481/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= +github.com/tebeka/snowball v0.3.0 h1:/vP76OjIhZrXtcmBmQgQ986B/WM95MB4tdLEuWdDgZk= +github.com/tebeka/snowball v0.3.0/go.mod h1:4IfL14h1lvwZcp1sfXuuc7/7yCsvVffTWxWxCLfFpYg= +github.com/tecbot/gorocksdb v0.0.0-20190705090504-162552197222 h1:FLimlAjzuhq8loeLX7lLhKKeUgpA/4slynlNVB/Qaks= +github.com/tecbot/gorocksdb v0.0.0-20190705090504-162552197222/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= @@ -248,55 +239,26 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20190823064033-3a9bac650e44/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190806162312-597adff16ade/go.mod h1:AlhUtkH4DA4asiFC5RgK7ZKmauvtkAVcy9L0epCzlWo= -golang.org/x/mobile v0.0.0-20190826170111-cafc553e1ac5/go.mod h1:mJOp/i0LXPxJZ9weeIadcPqKVfS05Ai7m6/t9z1Hs/Y= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -304,73 +266,33 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed h1:uPxWBzB3+mlnjy9W58qY1j/cjyFjutgw/Vhan2zLy/A= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa h1:KIDDMLT1O0Nr7TSxp8xM5tJcdn8tgyAONntO829og1M= -golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190808195139-e713427fea3f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190827205025-b29f5f60c37a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190916214212-f660b8655731 h1:Phvl0+G5t5k/EUFUi0wPdUUeTL2HydMQUXHnunWgSb0= +google.golang.org/genproto v0.0.0-20190916214212-f660b8655731/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1 h1:/7cs52RnTJmD43s3uxzlq2U7nqVTd/37viQwMrMNlOM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= @@ -383,8 +305,4 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/indexer/raft_server.go b/indexer/raft_server.go index 39ea9a8..6c2c41c 100644 --- a/indexer/raft_server.go +++ b/indexer/raft_server.go @@ -23,13 +23,10 @@ import ( "time" "github.com/blevesearch/bleve" - - "github.com/golang/protobuf/proto" - "github.com/blevesearch/bleve/mapping" + "github.com/golang/protobuf/proto" "github.com/hashicorp/raft" raftboltdb "github.com/hashicorp/raft-boltdb" - raftbadgerdb "github.com/markthethomas/raft-badger" _ "github.com/mosuka/blast/builtins" blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/protobuf/index" @@ -139,31 +136,31 @@ func (s *RaftServer) Start() error { s.logger.Fatal(err.Error()) return err } - case "badger": - logStorePath := filepath.Join(s.dataDir, "raft", "log") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Join(logStorePath, "badger"), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - logStore, err = raftbadgerdb.NewBadgerStore(logStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStorePath := filepath.Join(s.dataDir, "raft", "stable") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Join(stableStorePath, "badger"), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStore, err = raftbadgerdb.NewBadgerStore(stableStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } + //case "badger": + // logStorePath := filepath.Join(s.dataDir, "raft", "log") + // s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) + // err = os.MkdirAll(filepath.Join(logStorePath, "badger"), 0755) + // if err != nil { + // s.logger.Fatal(err.Error()) + // return err + // } + // logStore, err = raftbadgerdb.NewBadgerStore(logStorePath) + // if err != nil { + // s.logger.Fatal(err.Error()) + // return err + // } + // stableStorePath := filepath.Join(s.dataDir, "raft", "stable") + // s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) + // err = os.MkdirAll(filepath.Join(stableStorePath, "badger"), 0755) + // if err != nil { + // s.logger.Fatal(err.Error()) + // return err + // } + // stableStore, err = raftbadgerdb.NewBadgerStore(stableStorePath) + // if err != nil { + // s.logger.Fatal(err.Error()) + // return err + // } default: logStorePath := filepath.Join(s.dataDir, "raft", "log", "boltdb.db") s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) diff --git a/manager/raft_server.go b/manager/raft_server.go index c169e31..7918433 100644 --- a/manager/raft_server.go +++ b/manager/raft_server.go @@ -29,7 +29,6 @@ import ( "github.com/golang/protobuf/ptypes/any" "github.com/hashicorp/raft" raftboltdb "github.com/hashicorp/raft-boltdb" - raftbadgerdb "github.com/markthethomas/raft-badger" _ "github.com/mosuka/blast/builtins" blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/protobuf" @@ -141,31 +140,31 @@ func (s *RaftServer) Start() error { s.logger.Fatal(err.Error()) return err } - case "badger": - logStorePath := filepath.Join(s.dataDir, "raft", "log") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Join(logStorePath, "badger"), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - logStore, err = raftbadgerdb.NewBadgerStore(logStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStorePath := filepath.Join(s.dataDir, "raft", "stable") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Join(stableStorePath, "badger"), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStore, err = raftbadgerdb.NewBadgerStore(stableStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } + //case "badger": + // logStorePath := filepath.Join(s.dataDir, "raft", "log") + // s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) + // err = os.MkdirAll(filepath.Join(logStorePath, "badger"), 0755) + // if err != nil { + // s.logger.Fatal(err.Error()) + // return err + // } + // logStore, err = raftbadgerdb.NewBadgerStore(logStorePath) + // if err != nil { + // s.logger.Fatal(err.Error()) + // return err + // } + // stableStorePath := filepath.Join(s.dataDir, "raft", "stable") + // s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) + // err = os.MkdirAll(filepath.Join(stableStorePath, "badger"), 0755) + // if err != nil { + // s.logger.Fatal(err.Error()) + // return err + // } + // stableStore, err = raftbadgerdb.NewBadgerStore(stableStorePath) + // if err != nil { + // s.logger.Fatal(err.Error()) + // return err + // } default: logStorePath := filepath.Join(s.dataDir, "raft", "log", "boltdb.db") s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) diff --git a/protobuf/util_test.go b/protobuf/util_test.go index b94d717..9523b51 100644 --- a/protobuf/util_test.go +++ b/protobuf/util_test.go @@ -145,7 +145,7 @@ func TestMarshalAny_SearchRequest(t *testing.T) { t.Fatalf("expected content to see %s, saw %s", expectedType, actualType) } - expectedValue := []byte(`{"query":{"query":"blast"},"size":10,"from":0,"highlight":null,"fields":null,"facets":null,"explain":false,"sort":["-_score"],"includeLocations":false}`) + expectedValue := []byte(`{"query":{"query":"blast"},"size":10,"from":0,"highlight":null,"fields":null,"facets":null,"explain":false,"sort":["-_score"],"includeLocations":false,"search_after":null,"search_before":null}`) actualValue := dataAny.Value if !bytes.Equal(expectedValue, actualValue) { t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) From d19a3c591f588ad85cdb46735d3ebde8f54fd6b3 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 17 Sep 2019 20:05:00 +0900 Subject: [PATCH 40/76] Update CHANGES.md --- CHANGES.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 24823ef..a3c0b6e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -8,10 +8,14 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] ### Added + - Add swagger specification experimentaly #107 ### Changed +- Update go version and dependencies #109 + + ## [v0.8.0] ### Added @@ -36,6 +40,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Fix a bug for waiting to receive an indexer cluster updates from the stream #100 - Migrate to grpc-gateway #105 + ## [v0.7.1] - 2019-07-18 ### Added @@ -50,6 +55,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Remove original document #72 - Rename config package to builtins #75 + ## [v0.7.0] - 2019-07-03 ### Added @@ -60,6 +66,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Migrate grpc-middleware #68 + ## [v0.6.1] - 2019-06-21 ### Added @@ -69,6 +76,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Fix HTTP response into JSON format #64 - Update Dockerfile #62 + ## [v0.6.0] - 2019-06-19 ### Added @@ -83,6 +91,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Update logutils (#50) - Remve KVS (#49) + ## [v0.5.0] - 2019-03-22 ### Added From 1a640b094aeb7314ddccc9b6b63dae05d53821c6 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Sun, 22 Sep 2019 22:51:36 +0900 Subject: [PATCH 41/76] Update CHANGES.md --- CHANGES.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index a3c0b6e..5da97e0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,7 +9,12 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ### Added -- Add swagger specification experimentaly #107 +### Changed + + +## [v0.8.1] + +### Added ### Changed From e1b15523dd18a8c8fc05663b27d30d3cb55adb82 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Mon, 14 Oct 2019 19:38:34 +0900 Subject: [PATCH 42/76] Refactoring (#114) --- .gitignore | 3 ++ Makefile | 127 ++++++++++++++++++++++++++++------------------------- 2 files changed, 70 insertions(+), 60 deletions(-) diff --git a/.gitignore b/.gitignore index c111936..ba765bc 100644 --- a/.gitignore +++ b/.gitignore @@ -24,3 +24,6 @@ # Blast bin/ dist/ + +cover.out +cover.html diff --git a/Makefile b/Makefile index f017c30..ea6c4a4 100644 --- a/Makefile +++ b/Makefile @@ -12,17 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -GOOS ?= linux -GOARCH ?= amd64 +GOOS ?= +GOARCH ?= +GO111MODULE ?= on CGO_ENABLED ?= 0 CGO_CFLAGS ?= CGO_LDFLAGS ?= BUILD_TAGS ?= -DOCKER_REPOSITORY ?= mosuka -VERSION ?= BIN_EXT ?= - -GO := GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=$(CGO_ENABLED) CGO_CFLAGS=$(CGO_CFLAGS) CGO_LDFLAGS=$(CGO_LDFLAGS) GO111MODULE=on go +VERSION ?= +DOCKER_REPOSITORY ?= mosuka PACKAGES = $(shell $(GO) list ./... | grep -v '/vendor/') @@ -32,6 +31,14 @@ TARGET_PACKAGES = $(shell find . -name 'main.go' -print0 | xargs -0 -n1 dirname GRPC_GATEWAY_PATH = $(shell $(GO) list -m -f "{{.Dir}}" github.com/grpc-ecosystem/grpc-gateway) +ifeq ($(GOOS),) + GOOS = $(shell go version | awk -F ' ' '{print $$NF}' | awk -F '/' '{print $$1}') +endif + +ifeq ($(GOARCH),) + GOARCH = $(shell go version | awk -F ' ' '{print $$NF}' | awk -F '/' '{print $$2}') +endif + ifeq ($(VERSION),) VERSION = latest endif @@ -41,74 +48,80 @@ ifeq ($(GOOS),windows) BIN_EXT = .exe endif +GO := GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=$(CGO_ENABLED) CGO_CFLAGS=$(CGO_CFLAGS) CGO_LDFLAGS=$(CGO_LDFLAGS) GO111MODULE=$(GO111MODULE) go + .DEFAULT_GOAL := build -.PHONY: protoc -protoc: - @echo ">> generating proto3 code" +.PHONY: clean +clean: + @echo ">> cleaning binaries" + rm -rf ./bin + rm -rf ./data + rm -rf ./dist + +.PHONY: echo-env +echo-env: + @echo ">> echo environment variables" + @echo " GOOS = $(GOOS)" + @echo " GOARCH = $(GOARCH)" + @echo " GO111MODULE = $(GO111MODULE)" + @echo " CGO_ENABLED = $(CGO_ENABLED)" + @echo " CGO_CFLAGS = $(CGO_CFLAGS)" + @echo " CGO_LDFLAGS = $(CGO_LDFLAGS)" + @echo " BUILD_TAGS = $(BUILD_TAGS)" + @echo " BIN_EXT = $(BIN_EXT)" + @echo " VERSION = $(VERSION)" + @echo " DOCKER_REPOSITORY = $(DOCKER_REPOSITORY)" + @echo " PACKAGES = $(PACKAGES)" + @echo " PROTOBUFS = $(PROTOBUFS)" + @echo " TARGET_PACKAGES = $(TARGET_PACKAGES)" + @echo " LDFLAGS = $(LDFLAGS)" @echo " GRPC_GATEWAY_PATH = $(GRPC_GATEWAY_PATH)" - @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --go_out=plugins=grpc:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done - @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --grpc-gateway_out=logtostderr=true,allow_delete_body=true:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done - @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --swagger_out=logtostderr=true,allow_delete_body=true:. $$proto_dir/*.proto || exit 1; done .PHONY: format format: @echo ">> formatting code" @$(GO) fmt $(PACKAGES) +.PHONY: protoc +protoc: echo-env + @echo ">> generating proto3 code" + @echo " GRPC_GATEWAY_PATH = $(GRPC_GATEWAY_PATH)" + @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --go_out=plugins=grpc:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done + @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --grpc-gateway_out=logtostderr=true,allow_delete_body=true:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done + @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --swagger_out=logtostderr=true,allow_delete_body=true:. $$proto_dir/*.proto || exit 1; done + .PHONY: test -test: +test: echo-env @echo ">> testing all packages" - @echo " GOOS = $(GOOS)" - @echo " GOARCH = $(GOARCH)" - @echo " CGO_ENABLED = $(CGO_ENABLED)" - @echo " CGO_CFLAGS = $(CGO_CFLAGS)" - @echo " CGO_LDFLAGS = $(CGO_LDFLAGS)" - @echo " BUILD_TAGS = $(BUILD_TAGS)" @$(GO) test -v -tags="$(BUILD_TAGS)" $(PACKAGES) +.PHONY: coverage +coverage: echo-env + @echo ">> checking coverage of all packages" + $(GO) test -coverprofile=./cover.out -tags="$(BUILD_TAGS)" $(PACKAGES) + $(GO) tool cover -html=cover.out -o cover.html + .PHONY: build -build: +build: echo-env @echo ">> building binaries" - @echo " GOOS = $(GOOS)" - @echo " GOARCH = $(GOARCH)" - @echo " CGO_ENABLED = $(CGO_ENABLED)" - @echo " CGO_CFLAGS = $(CGO_CFLAGS)" - @echo " CGO_LDFLAGS = $(CGO_LDFLAGS)" - @echo " BUILD_TAGS = $(BUILD_TAGS)" - @echo " VERSION = $(VERSION)" - @for target_pkg in $(TARGET_PACKAGES); do echo $$target_pkg; $(GO) build -tags="$(BUILD_TAGS)" $(LDFLAGS) -o ./bin/`basename $$target_pkg`$(BIN_EXT) $$target_pkg || exit 1; done + for target_pkg in $(TARGET_PACKAGES); do echo $$target_pkg; $(GO) build -tags="$(BUILD_TAGS)" $(LDFLAGS) -o ./bin/`basename $$target_pkg`$(BIN_EXT) $$target_pkg || exit 1; done .PHONY: install -install: +install: echo-env @echo ">> installing binaries" - @echo " GOOS = $(GOOS)" - @echo " GOARCH = $(GOARCH)" - @echo " CGO_ENABLED = $(CGO_ENABLED)" - @echo " CGO_CFLAGS = $(CGO_CFLAGS)" - @echo " CGO_LDFLAGS = $(CGO_LDFLAGS)" - @echo " BUILD_TAGS = $(BUILD_TAGS)" - @echo " VERSION = $(VERSION)" - @for target_pkg in $(TARGET_PACKAGES); do echo $$target_pkg; $(GO) install -tags="$(BUILD_TAGS)" $(LDFLAGS) $$target_pkg || exit 1; done + for target_pkg in $(TARGET_PACKAGES); do echo $$target_pkg; $(GO) install -tags="$(BUILD_TAGS)" $(LDFLAGS) $$target_pkg || exit 1; done .PHONY: dist -dist: +dist: echo-env @echo ">> packaging binaries" - @echo " GOOS = $(GOOS)" - @echo " GOARCH = $(GOARCH)" - @echo " CGO_ENABLED = $(CGO_ENABLED)" - @echo " CGO_CFLAGS = $(CGO_CFLAGS)" - @echo " CGO_LDFLAGS = $(CGO_LDFLAGS)" - @echo " BUILD_TAGS = $(BUILD_TAGS)" - @echo " VERSION = $(VERSION)" mkdir -p ./dist/$(GOOS)-$(GOARCH)/bin - @for target_pkg in $(TARGET_PACKAGES); do echo $$target_pkg; $(GO) build -tags="$(BUILD_TAGS)" $(LDFLAGS) -o ./dist/$(GOOS)-$(GOARCH)/bin/`basename $$target_pkg`$(BIN_EXT) $$target_pkg || exit 1; done + for target_pkg in $(TARGET_PACKAGES); do echo $$target_pkg; $(GO) build -tags="$(BUILD_TAGS)" $(LDFLAGS) -o ./dist/$(GOOS)-$(GOARCH)/bin/`basename $$target_pkg`$(BIN_EXT) $$target_pkg || exit 1; done (cd ./dist/$(GOOS)-$(GOARCH); tar zcfv ../blast-${VERSION}.$(GOOS)-$(GOARCH).tar.gz .) .PHONY: git-tag -git-tag: +git-tag: echo-env @echo ">> tagging github" - @echo " VERSION = $(VERSION)" ifeq ($(VERSION),$(filter $(VERSION),latest master "")) @echo "please specify VERSION" else @@ -117,24 +130,18 @@ else endif .PHONY: docker-build -docker-build: +docker-build: echo-env @echo ">> building docker container image" - @echo " DOCKER_REPOSITORY = $(DOCKER_REPOSITORY)" - @echo " VERSION = $(VERSION)" docker build -t $(DOCKER_REPOSITORY)/blast:latest --build-arg VERSION=$(VERSION) . docker tag $(DOCKER_REPOSITORY)/blast:latest $(DOCKER_REPOSITORY)/blast:$(VERSION) .PHONY: docker-push -docker-push: +docker-push: echo-env @echo ">> pushing docker container image" - @echo " DOCKER_REPOSITORY = $(DOCKER_REPOSITORY)" - @echo " VERSION = $(VERSION)" docker push $(DOCKER_REPOSITORY)/blast:latest docker push $(DOCKER_REPOSITORY)/blast:$(VERSION) -.PHONY: clean -clean: - @echo ">> cleaning binaries" - rm -rf ./bin - rm -rf ./data - rm -rf ./dist +.PHONY: docker-pull +docker-pull: echo-env + @echo ">> pulling docker container image" + docker pull $(DOCKER_REPOSITORY):$(VERSION) From 67c6c93e949d199f33c8a38546aa35d2b04a82af Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Mon, 14 Oct 2019 21:28:25 +0900 Subject: [PATCH 43/76] Upgrade dependencies (#117) --- go.mod | 4 +++- go.sum | 38 ++++++++++++++++++++++++++++++++++++-- 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 789c17d..c50a8ae 100644 --- a/go.mod +++ b/go.mod @@ -3,12 +3,14 @@ module github.com/mosuka/blast go 1.13 require ( - github.com/blevesearch/bleve v0.8.0 + github.com/RoaringBitmap/roaring v0.4.21 // indirect + github.com/blevesearch/bleve v0.8.1 github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040 // indirect github.com/blevesearch/cld2 v0.0.0-20150916130542-10f17c049ec9 // indirect github.com/blevesearch/snowballstem v0.0.0-20180110192139-26b06a2c243d // indirect github.com/couchbase/ghistogram v0.0.0-20170308220240-d910dd063dd6 // indirect github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498 // indirect + github.com/couchbase/vellum v0.0.0-20190829182332-ef2e028c01fd // indirect github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 // indirect diff --git a/go.sum b/go.sum index 1202765..0379a4e 100644 --- a/go.sum +++ b/go.sum @@ -6,9 +6,12 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/RoaringBitmap/roaring v0.4.17 h1:oCYFIFEMSQZrLHpywH7919esI1VSrQZ0pJXkZPGIJ78= github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= +github.com/RoaringBitmap/roaring v0.4.21 h1:WJ/zIlNX4wQZ9x8Ey33O1UaD9TCTakYsdLFSBcTwH+8= +github.com/RoaringBitmap/roaring v0.4.21/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= github.com/Smerity/govarint v0.0.0-20150407073650-7265e41f48f1/go.mod h1:o80NPAib/LOl8Eysqppjj7kkGkqz++eqzYGlvROpDcQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -16,8 +19,8 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blevesearch/bleve v0.7.0/go.mod h1:Y2lmIkzV6mcNfAnAdOd+ZxHkHchhBfU/xroGIp61wfw= -github.com/blevesearch/bleve v0.8.0 h1:DCoCrxscCXrlzVWK92k7Vq4d28lTAFuigVmcgIX0VCo= -github.com/blevesearch/bleve v0.8.0/go.mod h1:Y2lmIkzV6mcNfAnAdOd+ZxHkHchhBfU/xroGIp61wfw= +github.com/blevesearch/bleve v0.8.1 h1:20zBREtGe8dvBxCC+717SaxKcUVQOWk3/Fm75vabKpU= +github.com/blevesearch/bleve v0.8.1/go.mod h1:Y2lmIkzV6mcNfAnAdOd+ZxHkHchhBfU/xroGIp61wfw= github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040 h1:SjYVcfJVZoCfBlg+fkaq2eoZHTf5HaJfaTeTkOtyfHQ= github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040/go.mod h1:WH+MU2F4T0VmSdaPX+Wu5GYoZBrYWdOZWSjzvYcDmqQ= github.com/blevesearch/cld2 v0.0.0-20150916130542-10f17c049ec9 h1:ZPImXwzC+ICkkSYlPP9mMVgQlZH24+56rIEUjVxfFnY= @@ -33,12 +36,19 @@ github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx2 github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/couchbase/ghistogram v0.0.0-20170308220240-d910dd063dd6 h1:T7Qykid5GIoDEVTZL0NcbimcT2qmzjo5mNGhe8i0/5M= github.com/couchbase/ghistogram v0.0.0-20170308220240-d910dd063dd6/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498 h1:b8rnI4JWbakUNfpmYDxGobTY/jTuF5zHLw0ID75yzuM= github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498/go.mod h1:mGI1GcdgmlL3Imff7Z+OjkkQ8qSKr443BuZ+qFgWbPQ= github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe h1:2o6Y7KMjJNsuMTF8f2H2eTKRhqH7+bQbjr+D+LnhE5M= github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe/go.mod h1:prYTC8EgTu3gwbqJihkud9zRXISvyulAplQ6exdCo1g= +github.com/couchbase/vellum v0.0.0-20190829182332-ef2e028c01fd h1:zeuJhcG3f8eePshH3KxkNE+Xtl53pVln9MOUPMyr/1w= +github.com/couchbase/vellum v0.0.0-20190829182332-ef2e028c01fd/go.mod h1:xbc8Ff/oG7h2ejd7AlwOpfd+6QZntc92ygpAOfGwcKY= +github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d h1:SwD98825d6bdB+pEuTxWOXiSjBrHdOl/UVp75eI7JT8= @@ -73,6 +83,8 @@ github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493 h1:OTanQnFt0bi5iLFSdbEVA/idR6Q2WhCm+deb7ir2CcM= github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -98,6 +110,8 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= +github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg= @@ -118,6 +132,7 @@ github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCS github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft v1.1.1 h1:HJr7UE1x/JrJSc9Oy6aDBHtNHUUBHjcQjTgvUVihoZs= github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= @@ -130,6 +145,7 @@ github.com/ikawaha/kagome.ipadic v1.1.0 h1:9hzwhcklEL4Cmp+lM9HQfmDg2nhB43Fe1n9UU github.com/ikawaha/kagome.ipadic v1.1.0/go.mod h1:DPSBbU0czaJhAb/5uKQZHMc9MTVRpDugJfX+HddPHHg= github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -147,10 +163,13 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217 h1:oWyemD7bnPAGRGGPE22W1Z+kspkC7Uclz5rdzgxxiwk= github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217/go.mod h1:5JLTyA+23fYz/BfD5Hn736mGEZopzWtEx1pdNfnTp8k= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -170,6 +189,7 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -197,6 +217,8 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= @@ -207,6 +229,12 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykE github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff h1:86HlEv0yBCry9syNuylzqznKXDK11p6D0DT596yNMys= github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 h1:JNEGSiWg6D3lcBCMCBqN3ELniXujt+0QNHLhNnO0w3s= github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2/go.mod h1:mjqs7N0Q6m5HpR7QfXVBZXZWSqTjQLeTujjA/xUp2uw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -226,10 +254,12 @@ github.com/tecbot/gorocksdb v0.0.0-20190705090504-162552197222/go.mod h1:ahpPrc7 github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= @@ -239,6 +269,7 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -265,6 +296,7 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -272,6 +304,8 @@ golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 00554a4ff7b4b541f20219764026b56fe395e8e2 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Mon, 14 Oct 2019 21:30:58 +0900 Subject: [PATCH 44/76] Update CHANGES.md --- CHANGES.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 5da97e0..76ac2fd 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,8 +9,12 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ### Added +- Add coverage to Makefile #114 + ### Changed +- Bump Bleve version to v0.8.1 #117 + ## [v0.8.1] From 6d497e8ad122f80ce3eee61723431c48ff4d412b Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Thu, 17 Oct 2019 19:51:36 +0900 Subject: [PATCH 45/76] Docker compose (#119) * Add docker-compose.yml * Update README.md --- README.md | 15 +++ docker-compose.yml | 221 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 236 insertions(+) create mode 100644 docker-compose.yml diff --git a/README.md b/README.md index beda7db..b8aae7b 100644 --- a/README.md +++ b/README.md @@ -1188,6 +1188,21 @@ You can execute the command in docker container as follows: $ docker exec -it blast-indexer1 blast indexer node info --grpc-address=:5000 ``` +### Running cluster on Docker compose + +Also, running a Blast cluster on Docker compose. + +```bash +$ docker-compose up +$ docker-compose ps +$ ./bin/blast manager get --grpc-address=127.0.0.1:5110 /cluster | jq . +``` + +```bash +$ docker-compose stop +$ docker-compose rm +``` + ## Wikipedia example diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..4d358be --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,221 @@ +version: '3.4' + +networks: + blast-cluster: + driver: bridge + +services: + manager1: + container_name: manager1 + image: mosuka/blast:latest + restart: always + ports: + - 2110:2110 + - 5110:5110 + - 6110:6110 + - 8110:8110 + networks: + - blast-cluster + volumes: + - ./example:/opt/blast/example + command: > + blast manager start + --node-id=blast-manager1 + --node-address=manager1:2110 + --grpc-address=manager1:5110 + --grpc-gateway-address=manager1:6110 + --http-address=manager1:8110 + --data-dir=/tmp/blast/manager1 + --raft-storage-type=boltdb + --index-mapping-file=/opt/blast/example/wiki_index_mapping.json + --index-type=scorch + --index-storage-type=scorch + + indexer1: + container_name: indexer1 + image: mosuka/blast:latest + restart: always + ports: + - 2010:2010 + - 5010:5010 + - 6010:6010 + - 8010:8010 + networks: + - blast-cluster + volumes: + - ./example:/opt/blast/example + depends_on: + - manager1 + command: > + blast indexer start + --manager-grpc-address=manager1:5110 + --shard-id=shard1 + --node-id=blast-indexer1 + --node-address=indexer1:2010 + --grpc-address=indexer1:5010 + --grpc-gateway-address=indexer1:6010 + --http-address=indexer1:8010 + --data-dir=/tmp/blast/indexer1 + --raft-storage-type=boltdb + + indexer2: + container_name: indexer2 + image: mosuka/blast:latest + restart: always + ports: + - 2020:2020 + - 5020:5020 + - 6020:6020 + - 8020:8020 + networks: + - blast-cluster + volumes: + - ./example:/opt/blast/example + depends_on: + - manager1 + command: > + blast indexer start + --manager-grpc-address=manager1:5110 + --shard-id=shard1 + --node-id=blast-indexer2 + --node-address=indexer2:2020 + --grpc-address=indexer2:5020 + --grpc-gateway-address=indexer2:6020 + --http-address=indexer2:8020 + --data-dir=/tmp/blast/indexer2 + --raft-storage-type=boltdb + + indexer3: + container_name: indexer3 + image: mosuka/blast:latest + restart: always + ports: + - 2030:2030 + - 5030:5030 + - 6030:6030 + - 8030:8030 + networks: + - blast-cluster + volumes: + - ./example:/opt/blast/example + depends_on: + - manager1 + command: > + blast indexer start + --manager-grpc-address=manager1:5110 + --shard-id=shard1 + --node-id=blast-indexer3 + --node-address=indexer3:2030 + --grpc-address=indexer3:5030 + --grpc-gateway-address=indexer3:6030 + --http-address=indexer3:8030 + --data-dir=/tmp/blast/indexer3 + --raft-storage-type=boltdb + + indexer4: + container_name: indexer4 + image: mosuka/blast:latest + restart: always + ports: + - 2040:2040 + - 5040:5040 + - 6040:6040 + - 8040:8040 + networks: + - blast-cluster + volumes: + - ./example:/opt/blast/example + depends_on: + - manager1 + command: > + blast indexer start + --manager-grpc-address=manager1:5110 + --shard-id=shard2 + --node-id=blast-indexer4 + --node-address=indexer4:2040 + --grpc-address=indexer4:5040 + --grpc-gateway-address=indexer4:6040 + --http-address=indexer4:8040 + --data-dir=/tmp/blast/indexer4 + --raft-storage-type=boltdb + + indexer5: + container_name: indexer5 + image: mosuka/blast:latest + restart: always + ports: + - 2050:2050 + - 5050:5050 + - 6050:6050 + - 8050:8050 + networks: + - blast-cluster + volumes: + - ./example:/opt/blast/example + depends_on: + - manager1 + command: > + blast indexer start + --manager-grpc-address=manager1:5110 + --shard-id=shard2 + --node-id=blast-indexer5 + --node-address=indexer5:2050 + --grpc-address=indexer5:5050 + --grpc-gateway-address=indexer5:6050 + --http-address=indexer5:8050 + --data-dir=/tmp/blast/indexer5 + --raft-storage-type=boltdb + + indexer6: + container_name: indexer6 + image: mosuka/blast:latest + restart: always + ports: + - 2060:2060 + - 5060:5060 + - 6060:6060 + - 8060:8060 + networks: + - blast-cluster + volumes: + - ./example:/opt/blast/example + depends_on: + - manager1 + command: > + blast indexer start + --manager-grpc-address=manager1:5110 + --shard-id=shard2 + --node-id=blast-indexer6 + --node-address=indexer6:2060 + --grpc-address=indexer6:5060 + --grpc-gateway-address=indexer6:6060 + --http-address=indexer6:8060 + --data-dir=/tmp/blast/indexer6 + --raft-storage-type=boltdb + + dispatcher1: + container_name: dispatcher1 + image: mosuka/blast:latest + restart: always + ports: + - 5210:5210 + - 6210:6210 + - 8210:8210 + networks: + - blast-cluster + volumes: + - ./example:/opt/blast/example + depends_on: + - manager1 + - indexer1 + - indexer2 + - indexer3 + - indexer4 + - indexer5 + - indexer6 + command: > + blast dispatcher start + --manager-grpc-address=manager1:5110 + --grpc-address=dispatcher1:5210 + --grpc-gateway-address=dispatcher1:6210 + --http-address=dispatcher1:8210 From 3d4d025cac3efdfe4ec48a79e91d3bf751ae8805 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Thu, 17 Oct 2019 19:52:34 +0900 Subject: [PATCH 46/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index 76ac2fd..913b3ab 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -10,6 +10,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ### Added - Add coverage to Makefile #114 +- Docker compose #119 ### Changed From 6d83ecca4086d1137f3417390b7c22e26277d39c Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Fri, 18 Oct 2019 16:41:48 +0900 Subject: [PATCH 47/76] Update README.md --- README.md | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index b8aae7b..2af73f5 100644 --- a/README.md +++ b/README.md @@ -1193,14 +1193,20 @@ $ docker exec -it blast-indexer1 blast indexer node info --grpc-address=:5000 Also, running a Blast cluster on Docker compose. ```bash -$ docker-compose up +$ docker-compose up -d manager1 +$ docker-compose up -d indexer1 +$ docker-compose up -d indexer2 +$ docker-compose up -d indexer3 +$ docker-compose up -d indexer4 +$ docker-compose up -d indexer5 +$ docker-compose up -d indexer6 +$ docker-compose up -d dispatcher1 $ docker-compose ps $ ./bin/blast manager get --grpc-address=127.0.0.1:5110 /cluster | jq . ``` ```bash -$ docker-compose stop -$ docker-compose rm +$ docker-compose down ``` From 7225085c15fd258a05ceb593d29225e7f089b3d6 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Fri, 18 Oct 2019 19:50:16 +0900 Subject: [PATCH 48/76] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 2af73f5..421b837 100644 --- a/README.md +++ b/README.md @@ -1203,6 +1203,8 @@ $ docker-compose up -d indexer6 $ docker-compose up -d dispatcher1 $ docker-compose ps $ ./bin/blast manager get --grpc-address=127.0.0.1:5110 /cluster | jq . +$ ./bin/blast dispatcher index --grpc-address=127.0.0.1:5210 --file=./example/wiki_bulk_index.jsonl --bulk | jq . +$ ./bin/blast dispatcher search --grpc-address=127.0.0.1:5210 --file=./example/wiki_search_request_simple.json | jq . ``` ```bash From 7c5a2f07118fff173ee3589b59b0721ff8da3b59 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 7 Apr 2020 17:07:12 +0900 Subject: [PATCH 49/76] Delete the experimentally implemented feature for distributed search (#127) * Delete the experimentally implemented feature for distributed search * Update Changes.md --- .gitignore | 20 +- CHANGES.md | 39 - Dockerfile | 61 +- Makefile | 101 +- README.md | 1281 +++----- builtin/config_bleve.go | 5 + builtins/config_badger.go | 21 - builtins/config_bleve.go | 19 - client/grpc_client.go | 218 ++ cmd/blast/dispatcher_delete.go | 125 - cmd/blast/dispatcher_get.go | 64 - cmd/blast/dispatcher_index.go | 273 -- cmd/blast/dispatcher_node_health.go | 79 - cmd/blast/dispatcher_search.go | 119 - cmd/blast/dispatcher_start.go | 98 - cmd/blast/indexer_cluster_info.go | 56 - cmd/blast/indexer_cluster_leave.go | 68 - cmd/blast/indexer_cluster_watch.go | 83 - cmd/blast/indexer_delete.go | 125 - cmd/blast/indexer_get.go | 64 - cmd/blast/indexer_index.go | 272 -- cmd/blast/indexer_node_health.go | 79 - cmd/blast/indexer_node_info.go | 57 - cmd/blast/indexer_search.go | 119 - cmd/blast/indexer_snapshot.go | 56 - cmd/blast/indexer_start.go | 136 - cmd/blast/main.go | 1010 ------ cmd/blast/manager_cluster_info.go | 55 - cmd/blast/manager_cluster_leave.go | 63 - cmd/blast/manager_cluster_watch.go | 84 - cmd/blast/manager_delete.go | 64 - cmd/blast/manager_get.go | 60 - cmd/blast/manager_node_health.go | 79 - cmd/blast/manager_node_info.go | 55 - cmd/blast/manager_set.go | 92 - cmd/blast/manager_snapshot.go | 55 - cmd/blast/manager_start.go | 134 - cmd/blast/manager_watch.go | 74 - cmd/bulk_delete.go | 129 + cmd/bulk_index.go | 135 + cmd/cluster.go | 90 + cmd/delete.go | 89 + cmd/get.go | 99 + cmd/healthcheck.go | 100 + cmd/join.go | 104 + cmd/leave.go | 89 + cmd/mapping.go | 84 + cmd/metrics.go | 84 + cmd/node.go | 90 + cmd/root.go | 17 + cmd/search.go | 101 + cmd/set.go | 94 + cmd/snapshot.go | 81 + cmd/start.go | 211 ++ cmd/variables.go | 22 + cmd/version.go | 24 + cmd/watch.go | 157 + dispatcher/grpc_client.go | 118 - dispatcher/grpc_gateway.go | 353 --- dispatcher/grpc_server.go | 94 - dispatcher/grpc_service.go | 974 ------ dispatcher/http_handler.go | 79 - dispatcher/http_server.go | 69 - dispatcher/server.go | 151 - dispatcher/server_test.go | 610 ---- docker-compose.yml | 221 -- docker-entrypoint.sh | 19 - errors/errors.go | 26 +- etc/blast.yaml | 16 + .../blast_mapping.json | 6 +- example/geo_doc_2.json | 23 - example/geo_doc_3.json | 23 - example/geo_doc_4.json | 23 - example/geo_doc_5.json | 23 - example/geo_doc_6.json | 23 - example/wiki_bulk_delete.txt | 36 - example/wiki_bulk_index.jsonl | 36 - example/wiki_doc_arwiki_1.json | 9 - example/wiki_doc_bgwiki_1.json | 9 - example/wiki_doc_cawiki_1.json | 9 - example/wiki_doc_cswiki_1.json | 9 - example/wiki_doc_dawiki_1.json | 9 - example/wiki_doc_dewiki_1.json | 9 - example/wiki_doc_elwiki_1.json | 9 - example/wiki_doc_enwiki_1.json | 9 - example/wiki_doc_eswiki_1.json | 9 - example/wiki_doc_fawiki_1.json | 9 - example/wiki_doc_fiwiki_1.json | 9 - example/wiki_doc_frwiki_1.json | 9 - example/wiki_doc_gawiki_1.json | 9 - example/wiki_doc_glwiki_1.json | 9 - example/wiki_doc_guwiki_1.json | 9 - example/wiki_doc_hiwiki_1.json | 9 - example/wiki_doc_huwiki_1.json | 9 - example/wiki_doc_hywiki_1.json | 9 - example/wiki_doc_idwiki_1.json | 9 - example/wiki_doc_itwiki_1.json | 9 - example/wiki_doc_jawiki_1.json | 9 - example/wiki_doc_knwiki_1.json | 9 - example/wiki_doc_kowiki_1.json | 9 - example/wiki_doc_mlwiki_1.json | 9 - example/wiki_doc_nlwiki_1.json | 9 - example/wiki_doc_nowiki_1.json | 9 - example/wiki_doc_pswiki_1.json | 9 - example/wiki_doc_ptwiki_1.json | 9 - example/wiki_doc_rowiki_1.json | 9 - example/wiki_doc_ruwiki_1.json | 9 - example/wiki_doc_svwiki_1.json | 9 - example/wiki_doc_tawiki_1.json | 9 - example/wiki_doc_tewiki_1.json | 9 - example/wiki_doc_thwiki_1.json | 9 - example/wiki_doc_trwiki_1.json | 9 - example/wiki_doc_zhwiki_1.json | 9 - examples/example_bulk_delete.txt | 11 + examples/example_bulk_index.json | 11 + examples/example_doc_1.json | 8 + examples/example_mapping.json | 103 + .../example_search_request.json | 0 .../example_search_request_prefix.json | 0 .../example_search_request_simple.json | 0 examples/geo_example_bulk_index.json | 6 + .../geo_example_doc_1.json | 5 +- .../geo_example_mapping.json | 22 +- .../geo_example_search_request.json | 0 .../multiple_type_example_bulk_index.json | 36 + .../multiple_type_example_mapping.json | 72 +- go.mod | 54 +- go.sum | 249 +- hashutils/hashutils.go | 32 - http/metric.go | 100 - http/response.go | 44 - indexer/grpc_client.go | 150 - indexer/grpc_gateway.go | 376 --- indexer/grpc_server.go | 94 - indexer/grpc_service.go | 1018 ------ indexer/http_handler.go | 79 - indexer/http_server.go | 69 - indexer/index.go | 290 -- indexer/raft_fsm.go | 363 --- indexer/raft_server.go | 688 ---- indexer/server.go | 362 --- indexer/server_test.go | 2177 ------------- logutils/logger.go => log/log.go | 63 +- logutils/grpc_logger.go | 80 - logutils/http_logger.go | 90 - main.go | 15 + manager/grpc_client.go | 156 - manager/grpc_gateway.go | 172 - manager/grpc_server.go | 114 - manager/grpc_service.go | 714 ----- manager/http_handler.go | 79 - manager/http_server.go | 69 - manager/raft_fsm.go | 269 -- manager/raft_fsm_test.go | 552 ---- manager/raft_server.go | 641 ---- manager/server.go | 232 -- manager/server_test.go | 2774 ----------------- .../indexutils.go => mapping/mapping.go | 26 +- maputils/error.go | 21 - maputils/maputils.go | 302 -- maputils/maputils_test.go | 679 ---- marshaler/marshaler.go | 186 ++ marshaler/util.go | 69 + marshaler/util_test.go | 109 + metric/metric.go | 895 ++++++ protobuf/distribute/distribute.pb.go | 945 ------ protobuf/distribute/distribute.pb.gw.go | 443 --- protobuf/distribute/distribute.proto | 135 - protobuf/distribute/distribute.swagger.json | 362 --- protobuf/index.pb.go | 1913 ++++++++++++ protobuf/index.pb.gw.go | 1276 ++++++++ protobuf/index.proto | 223 ++ protobuf/index/index.go | 75 - protobuf/index/index.pb.go | 2051 ------------ protobuf/index/index.pb.gw.go | 510 --- protobuf/index/index.proto | 241 -- protobuf/index/index.swagger.json | 557 ---- protobuf/management/management.pb.go | 1649 ---------- protobuf/management/management.pb.gw.go | 379 --- protobuf/management/management.proto | 203 -- protobuf/management/management.swagger.json | 409 --- protobuf/util.go | 57 - protobuf/util_test.go | 307 -- registry/type.go | 56 +- server/grpc_gateway.go | 129 + server/grpc_server.go | 129 + server/grpc_service.go | 540 ++++ server/raft_fsm.go | 400 +++ server/raft_fsm_test.go | 743 +++++ server/raft_server.go | 857 +++++ server/raft_server_test.go | 1536 +++++++++ sortutils/sort.go | 49 - storage/index.go | 269 ++ storage/index_test.go | 341 ++ strutils/strutils.go | 49 - testutils/testutils.go | 43 - util/temp.go | 29 + version/version.go | 14 - 198 files changed, 12640 insertions(+), 29515 deletions(-) create mode 100644 builtin/config_bleve.go delete mode 100644 builtins/config_badger.go delete mode 100644 builtins/config_bleve.go create mode 100644 client/grpc_client.go delete mode 100644 cmd/blast/dispatcher_delete.go delete mode 100644 cmd/blast/dispatcher_get.go delete mode 100644 cmd/blast/dispatcher_index.go delete mode 100644 cmd/blast/dispatcher_node_health.go delete mode 100644 cmd/blast/dispatcher_search.go delete mode 100644 cmd/blast/dispatcher_start.go delete mode 100644 cmd/blast/indexer_cluster_info.go delete mode 100644 cmd/blast/indexer_cluster_leave.go delete mode 100644 cmd/blast/indexer_cluster_watch.go delete mode 100644 cmd/blast/indexer_delete.go delete mode 100644 cmd/blast/indexer_get.go delete mode 100644 cmd/blast/indexer_index.go delete mode 100644 cmd/blast/indexer_node_health.go delete mode 100644 cmd/blast/indexer_node_info.go delete mode 100644 cmd/blast/indexer_search.go delete mode 100644 cmd/blast/indexer_snapshot.go delete mode 100644 cmd/blast/indexer_start.go delete mode 100644 cmd/blast/main.go delete mode 100644 cmd/blast/manager_cluster_info.go delete mode 100644 cmd/blast/manager_cluster_leave.go delete mode 100644 cmd/blast/manager_cluster_watch.go delete mode 100644 cmd/blast/manager_delete.go delete mode 100644 cmd/blast/manager_get.go delete mode 100644 cmd/blast/manager_node_health.go delete mode 100644 cmd/blast/manager_node_info.go delete mode 100644 cmd/blast/manager_set.go delete mode 100644 cmd/blast/manager_snapshot.go delete mode 100644 cmd/blast/manager_start.go delete mode 100644 cmd/blast/manager_watch.go create mode 100644 cmd/bulk_delete.go create mode 100644 cmd/bulk_index.go create mode 100644 cmd/cluster.go create mode 100644 cmd/delete.go create mode 100644 cmd/get.go create mode 100644 cmd/healthcheck.go create mode 100644 cmd/join.go create mode 100644 cmd/leave.go create mode 100644 cmd/mapping.go create mode 100644 cmd/metrics.go create mode 100644 cmd/node.go create mode 100644 cmd/root.go create mode 100644 cmd/search.go create mode 100644 cmd/set.go create mode 100644 cmd/snapshot.go create mode 100644 cmd/start.go create mode 100644 cmd/variables.go create mode 100644 cmd/version.go create mode 100644 cmd/watch.go delete mode 100644 dispatcher/grpc_client.go delete mode 100644 dispatcher/grpc_gateway.go delete mode 100644 dispatcher/grpc_server.go delete mode 100644 dispatcher/grpc_service.go delete mode 100644 dispatcher/http_handler.go delete mode 100644 dispatcher/http_server.go delete mode 100644 dispatcher/server.go delete mode 100644 dispatcher/server_test.go delete mode 100644 docker-compose.yml delete mode 100755 docker-entrypoint.sh create mode 100644 etc/blast.yaml rename example/enwiki_index_mapping.json => etc/blast_mapping.json (97%) delete mode 100644 example/geo_doc_2.json delete mode 100644 example/geo_doc_3.json delete mode 100644 example/geo_doc_4.json delete mode 100644 example/geo_doc_5.json delete mode 100644 example/geo_doc_6.json delete mode 100644 example/wiki_bulk_delete.txt delete mode 100644 example/wiki_bulk_index.jsonl delete mode 100644 example/wiki_doc_arwiki_1.json delete mode 100644 example/wiki_doc_bgwiki_1.json delete mode 100644 example/wiki_doc_cawiki_1.json delete mode 100644 example/wiki_doc_cswiki_1.json delete mode 100644 example/wiki_doc_dawiki_1.json delete mode 100644 example/wiki_doc_dewiki_1.json delete mode 100644 example/wiki_doc_elwiki_1.json delete mode 100644 example/wiki_doc_enwiki_1.json delete mode 100644 example/wiki_doc_eswiki_1.json delete mode 100644 example/wiki_doc_fawiki_1.json delete mode 100644 example/wiki_doc_fiwiki_1.json delete mode 100644 example/wiki_doc_frwiki_1.json delete mode 100644 example/wiki_doc_gawiki_1.json delete mode 100644 example/wiki_doc_glwiki_1.json delete mode 100644 example/wiki_doc_guwiki_1.json delete mode 100644 example/wiki_doc_hiwiki_1.json delete mode 100644 example/wiki_doc_huwiki_1.json delete mode 100644 example/wiki_doc_hywiki_1.json delete mode 100644 example/wiki_doc_idwiki_1.json delete mode 100644 example/wiki_doc_itwiki_1.json delete mode 100644 example/wiki_doc_jawiki_1.json delete mode 100644 example/wiki_doc_knwiki_1.json delete mode 100644 example/wiki_doc_kowiki_1.json delete mode 100644 example/wiki_doc_mlwiki_1.json delete mode 100644 example/wiki_doc_nlwiki_1.json delete mode 100644 example/wiki_doc_nowiki_1.json delete mode 100644 example/wiki_doc_pswiki_1.json delete mode 100644 example/wiki_doc_ptwiki_1.json delete mode 100644 example/wiki_doc_rowiki_1.json delete mode 100644 example/wiki_doc_ruwiki_1.json delete mode 100644 example/wiki_doc_svwiki_1.json delete mode 100644 example/wiki_doc_tawiki_1.json delete mode 100644 example/wiki_doc_tewiki_1.json delete mode 100644 example/wiki_doc_thwiki_1.json delete mode 100644 example/wiki_doc_trwiki_1.json delete mode 100644 example/wiki_doc_zhwiki_1.json create mode 100644 examples/example_bulk_delete.txt create mode 100644 examples/example_bulk_index.json create mode 100644 examples/example_doc_1.json create mode 100644 examples/example_mapping.json rename example/wiki_search_request.json => examples/example_search_request.json (100%) rename example/wiki_search_request_prefix.json => examples/example_search_request_prefix.json (100%) rename example/wiki_search_request_simple.json => examples/example_search_request_simple.json (100%) create mode 100644 examples/geo_example_bulk_index.json rename example/geo_doc_1.json => examples/geo_example_doc_1.json (89%) rename example/geo_index_mapping.json => examples/geo_example_mapping.json (60%) rename example/geo_search_request.json => examples/geo_example_search_request.json (100%) create mode 100644 examples/multiple_type_example_bulk_index.json rename example/wiki_index_mapping.json => examples/multiple_type_example_mapping.json (99%) delete mode 100644 hashutils/hashutils.go delete mode 100644 http/metric.go delete mode 100644 http/response.go delete mode 100644 indexer/grpc_client.go delete mode 100644 indexer/grpc_gateway.go delete mode 100644 indexer/grpc_server.go delete mode 100644 indexer/grpc_service.go delete mode 100644 indexer/http_handler.go delete mode 100644 indexer/http_server.go delete mode 100644 indexer/index.go delete mode 100644 indexer/raft_fsm.go delete mode 100644 indexer/raft_server.go delete mode 100644 indexer/server.go delete mode 100644 indexer/server_test.go rename logutils/logger.go => log/log.go (54%) delete mode 100644 logutils/grpc_logger.go delete mode 100644 logutils/http_logger.go create mode 100644 main.go delete mode 100644 manager/grpc_client.go delete mode 100644 manager/grpc_gateway.go delete mode 100644 manager/grpc_server.go delete mode 100644 manager/grpc_service.go delete mode 100644 manager/http_handler.go delete mode 100644 manager/http_server.go delete mode 100644 manager/raft_fsm.go delete mode 100644 manager/raft_fsm_test.go delete mode 100644 manager/raft_server.go delete mode 100644 manager/server.go delete mode 100644 manager/server_test.go rename indexutils/indexutils.go => mapping/mapping.go (63%) delete mode 100644 maputils/error.go delete mode 100644 maputils/maputils.go delete mode 100644 maputils/maputils_test.go create mode 100644 marshaler/marshaler.go create mode 100644 marshaler/util.go create mode 100644 marshaler/util_test.go create mode 100644 metric/metric.go delete mode 100644 protobuf/distribute/distribute.pb.go delete mode 100644 protobuf/distribute/distribute.pb.gw.go delete mode 100644 protobuf/distribute/distribute.proto delete mode 100644 protobuf/distribute/distribute.swagger.json create mode 100644 protobuf/index.pb.go create mode 100644 protobuf/index.pb.gw.go create mode 100644 protobuf/index.proto delete mode 100644 protobuf/index/index.go delete mode 100644 protobuf/index/index.pb.go delete mode 100644 protobuf/index/index.pb.gw.go delete mode 100644 protobuf/index/index.proto delete mode 100644 protobuf/index/index.swagger.json delete mode 100644 protobuf/management/management.pb.go delete mode 100644 protobuf/management/management.pb.gw.go delete mode 100644 protobuf/management/management.proto delete mode 100644 protobuf/management/management.swagger.json delete mode 100644 protobuf/util.go delete mode 100644 protobuf/util_test.go create mode 100644 server/grpc_gateway.go create mode 100644 server/grpc_server.go create mode 100644 server/grpc_service.go create mode 100644 server/raft_fsm.go create mode 100644 server/raft_fsm_test.go create mode 100644 server/raft_server.go create mode 100644 server/raft_server_test.go delete mode 100644 sortutils/sort.go create mode 100644 storage/index.go create mode 100644 storage/index_test.go delete mode 100644 strutils/strutils.go delete mode 100644 testutils/testutils.go create mode 100644 util/temp.go diff --git a/.gitignore b/.gitignore index ba765bc..ee0c04c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,29 +1,15 @@ -# Copyright (c) 2017 Minoru Osuka -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - .DS_Store -# Eclipse .classpath .project -# Gogland .idea/ -# Blast bin/ dist/ +*.pem +*.csr + cover.out cover.html diff --git a/CHANGES.md b/CHANGES.md index 913b3ab..b660b65 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,32 +7,19 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] -### Added - - Add coverage to Makefile #114 - Docker compose #119 - -### Changed - - Bump Bleve version to v0.8.1 #117 ## [v0.8.1] -### Added - -### Changed - - Update go version and dependencies #109 ## [v0.8.0] -### Added - Add swagger specification experimentaly #107 - -### Changed - - New CLI #82 - Split protobuf into components #84 - Change subcommands #85 @@ -53,14 +40,9 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [v0.7.1] - 2019-07-18 -### Added - - Add raft-badger #69 - Add raft-storage-type flag #73 - Add gRPC access logger #74 - -### Changed - - Improve indexing performance #71 - Remove original document #72 - Rename config package to builtins #75 @@ -68,35 +50,21 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [v0.7.0] - 2019-07-03 -### Added - - Add GEO search example #65 - -### Changed - - Migrate grpc-middleware #68 ## [v0.6.1] - 2019-06-21 -### Added - -### Changed - - Fix HTTP response into JSON format #64 - Update Dockerfile #62 ## [v0.6.0] - 2019-06-19 -### Added - - Add federated search #30 - Add cluster manager (#48) - Add KVS HTTP handlers #46 - -### Changed - - Update http logger #51 - Update logutils (#50) - Remve KVS (#49) @@ -104,8 +72,6 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [v0.5.0] - 2019-03-22 -### Added - - Support bulk update #41 - Support Badger #38 - Add index stats #37 @@ -114,9 +80,6 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Add logging #33 - Add CHANGES.md #29 - Add error handling for server startup #28. - -### Changed - - Fixed some badger bugs #40 - Restructure store package #36 - Update examples #32 @@ -125,6 +88,4 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [v0.4.0] - 2019-03-14 -### Changed - - Code refactoring. diff --git a/Dockerfile b/Dockerfile index 4da3182..f3f3173 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,18 +1,4 @@ -# Copyright (c) 2019 Minoru Osuka -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM golang:1.13.0-stretch +FROM golang:1.14.1-stretch ARG VERSION @@ -23,15 +9,15 @@ COPY . ${GOPATH}/src/github.com/mosuka/blast RUN echo "deb http://ftp.us.debian.org/debian/ jessie main contrib non-free" >> /etc/apt/sources.list && \ echo "deb-src http://ftp.us.debian.org/debian/ jessie main contrib non-free" >> /etc/apt/sources.list && \ apt-get update && \ + apt-get upgrade -y && \ apt-get install -y \ - git \ - golang \ - libicu-dev \ - libstemmer-dev \ - libleveldb-dev \ - gcc-4.8 \ - g++-4.8 \ - build-essential && \ + git \ + golang \ + libicu-dev \ + libstemmer-dev \ + gcc-4.8 \ + g++-4.8 \ + build-essential && \ apt-get clean && \ update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-6 80 && \ update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-6 80 && \ @@ -44,30 +30,29 @@ RUN echo "deb http://ftp.us.debian.org/debian/ jessie main contrib non-free" >> ./compile_libs.sh && \ cp *.so /usr/local/lib && \ cd ${GOPATH}/src/github.com/mosuka/blast && \ - make \ - GOOS=linux \ - GOARCH=amd64 \ - CGO_ENABLED=1 \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - VERSION="${VERSION}" \ - build + make GOOS=linux \ + GOARCH=amd64 \ + CGO_ENABLED=1 \ + BUILD_TAGS="kagome icu libstemmer cld2" \ + VERSION="${VERSION}" \ + build FROM debian:stretch-slim MAINTAINER Minoru Osuka "minoru.osuka@gmail.com" RUN apt-get update && \ + apt-get upgrade -y && \ apt-get install -y \ - libicu-dev \ - libstemmer-dev \ - libleveldb-dev && \ - apt-get clean + libicu-dev \ + libstemmer-dev && \ + apt-get clean && \ + rm -rf /var/cache/apk/* COPY --from=0 /go/src/github.com/blevesearch/cld2/cld2/internal/*.so /usr/local/lib/ COPY --from=0 /go/src/github.com/mosuka/blast/bin/* /usr/bin/ -COPY --from=0 /go/src/github.com/mosuka/blast/docker-entrypoint.sh /usr/bin/ -EXPOSE 2000 5000 6000 8000 +EXPOSE 7000 8000 9000 -ENTRYPOINT [ "/usr/bin/docker-entrypoint.sh" ] -CMD [ "blast", "--help" ] +ENTRYPOINT [ "/usr/bin/blast" ] +CMD [ "start" ] diff --git a/Makefile b/Makefile index ea6c4a4..a47d17d 100644 --- a/Makefile +++ b/Makefile @@ -1,33 +1,19 @@ -# Copyright (c) 2019 Minoru Osuka -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - GOOS ?= GOARCH ?= GO111MODULE ?= on CGO_ENABLED ?= 0 CGO_CFLAGS ?= CGO_LDFLAGS ?= -BUILD_TAGS ?= -BIN_EXT ?= +BUILD_TAGS ?= kagome VERSION ?= +BIN_EXT ?= DOCKER_REPOSITORY ?= mosuka PACKAGES = $(shell $(GO) list ./... | grep -v '/vendor/') PROTOBUFS = $(shell find . -name '*.proto' -print0 | xargs -0 -n1 dirname | sort | uniq | grep -v /vendor/) -TARGET_PACKAGES = $(shell find . -name 'main.go' -print0 | xargs -0 -n1 dirname | sort | uniq | grep -v /vendor/) +TARGET_PACKAGES = $(shell find $(CURDIR) -name 'main.go' -print0 | xargs -0 -n1 dirname | sort | uniq | grep -v /vendor/) GRPC_GATEWAY_PATH = $(shell $(GO) list -m -f "{{.Dir}}" github.com/grpc-ecosystem/grpc-gateway) @@ -42,7 +28,7 @@ endif ifeq ($(VERSION),) VERSION = latest endif -LDFLAGS = -ldflags "-s -w -X \"github.com/mosuka/blast/version.Version=$(VERSION)\"" +LDFLAGS = -ldflags "-X \"github.com/mosuka/blast/version.Version=$(VERSION)\"" ifeq ($(GOOS),windows) BIN_EXT = .exe @@ -52,16 +38,9 @@ GO := GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=$(CGO_ENABLED) CGO_CFLAGS=$(CGO_ .DEFAULT_GOAL := build -.PHONY: clean -clean: - @echo ">> cleaning binaries" - rm -rf ./bin - rm -rf ./data - rm -rf ./dist - -.PHONY: echo-env -echo-env: - @echo ">> echo environment variables" +.PHONY: show-env +show-env: + @echo ">> show env" @echo " GOOS = $(GOOS)" @echo " GOARCH = $(GOARCH)" @echo " GO111MODULE = $(GO111MODULE)" @@ -69,58 +48,68 @@ echo-env: @echo " CGO_CFLAGS = $(CGO_CFLAGS)" @echo " CGO_LDFLAGS = $(CGO_LDFLAGS)" @echo " BUILD_TAGS = $(BUILD_TAGS)" - @echo " BIN_EXT = $(BIN_EXT)" @echo " VERSION = $(VERSION)" + @echo " BIN_EXT = $(BIN_EXT)" @echo " DOCKER_REPOSITORY = $(DOCKER_REPOSITORY)" + @echo " LDFLAGS = $(LDFLAGS)" @echo " PACKAGES = $(PACKAGES)" @echo " PROTOBUFS = $(PROTOBUFS)" @echo " TARGET_PACKAGES = $(TARGET_PACKAGES)" - @echo " LDFLAGS = $(LDFLAGS)" @echo " GRPC_GATEWAY_PATH = $(GRPC_GATEWAY_PATH)" -.PHONY: format -format: - @echo ">> formatting code" - @$(GO) fmt $(PACKAGES) - .PHONY: protoc -protoc: echo-env +protoc: show-env @echo ">> generating proto3 code" - @echo " GRPC_GATEWAY_PATH = $(GRPC_GATEWAY_PATH)" - @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --go_out=plugins=grpc:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done - @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --grpc-gateway_out=logtostderr=true,allow_delete_body=true:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done - @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --swagger_out=logtostderr=true,allow_delete_body=true:. $$proto_dir/*.proto || exit 1; done + for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=$$proto_dir --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --go_out=plugins=grpc:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done + for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=$$proto_dir --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --grpc-gateway_out=logtostderr=true,allow_delete_body=true:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done + +.PHONY: format +format: show-env + @echo ">> formatting code" + $(GO) fmt $(PACKAGES) .PHONY: test -test: echo-env +test: show-env @echo ">> testing all packages" - @$(GO) test -v -tags="$(BUILD_TAGS)" $(PACKAGES) + $(GO) test -v -tags="$(BUILD_TAGS)" $(PACKAGES) .PHONY: coverage -coverage: echo-env +coverage: show-env @echo ">> checking coverage of all packages" $(GO) test -coverprofile=./cover.out -tags="$(BUILD_TAGS)" $(PACKAGES) $(GO) tool cover -html=cover.out -o cover.html +.PHONY: clean +clean: show-env + @echo ">> cleaning binaries" + rm -rf ./bin + rm -rf ./data + rm -rf ./dist + .PHONY: build -build: echo-env +build: show-env @echo ">> building binaries" for target_pkg in $(TARGET_PACKAGES); do echo $$target_pkg; $(GO) build -tags="$(BUILD_TAGS)" $(LDFLAGS) -o ./bin/`basename $$target_pkg`$(BIN_EXT) $$target_pkg || exit 1; done .PHONY: install -install: echo-env +install: show-env @echo ">> installing binaries" for target_pkg in $(TARGET_PACKAGES); do echo $$target_pkg; $(GO) install -tags="$(BUILD_TAGS)" $(LDFLAGS) $$target_pkg || exit 1; done .PHONY: dist -dist: echo-env +dist: show-env @echo ">> packaging binaries" mkdir -p ./dist/$(GOOS)-$(GOARCH)/bin for target_pkg in $(TARGET_PACKAGES); do echo $$target_pkg; $(GO) build -tags="$(BUILD_TAGS)" $(LDFLAGS) -o ./dist/$(GOOS)-$(GOARCH)/bin/`basename $$target_pkg`$(BIN_EXT) $$target_pkg || exit 1; done (cd ./dist/$(GOOS)-$(GOARCH); tar zcfv ../blast-${VERSION}.$(GOOS)-$(GOARCH).tar.gz .) -.PHONY: git-tag -git-tag: echo-env +.PHONY: list-tag +list-tag: + @echo ">> listing github tags" + git tag -l --sort=-v:refname + +.PHONY: tag +tag: show-env @echo ">> tagging github" ifeq ($(VERSION),$(filter $(VERSION),latest master "")) @echo "please specify VERSION" @@ -130,18 +119,22 @@ else endif .PHONY: docker-build -docker-build: echo-env +docker-build: show-env @echo ">> building docker container image" docker build -t $(DOCKER_REPOSITORY)/blast:latest --build-arg VERSION=$(VERSION) . docker tag $(DOCKER_REPOSITORY)/blast:latest $(DOCKER_REPOSITORY)/blast:$(VERSION) .PHONY: docker-push -docker-push: echo-env +docker-push: show-env @echo ">> pushing docker container image" docker push $(DOCKER_REPOSITORY)/blast:latest docker push $(DOCKER_REPOSITORY)/blast:$(VERSION) -.PHONY: docker-pull -docker-pull: echo-env - @echo ">> pulling docker container image" - docker pull $(DOCKER_REPOSITORY):$(VERSION) +.PHONY: docker-clean +docker-clean: show-env + docker rmi -f $(shell docker images --filter "dangling=true" -q --no-trunc) + +.PHONY: cert +cert: show-env + @echo ">> generating certification" + openssl req -x509 -nodes -newkey rsa:4096 -keyout ./etc/blast_key.pem -out ./etc/blast_cert.pem -days 365 -subj '/CN=localhost' diff --git a/README.md b/README.md index 421b837..0d10c58 100644 --- a/README.md +++ b/README.md @@ -1,22 +1,3 @@ - - # Blast Blast is a full-text search and indexing server written in [Go](https://golang.org) built on top of [Bleve](http://www.blevesearch.com). @@ -31,16 +12,14 @@ Blast makes it easy for programmers to develop search applications with advanced - Faceted search - Spatial/Geospatial search - Search result highlighting -- Distributed search/indexing - Index replication - Bringing up cluster -- Cluster Federation - An easy-to-use HTTP API - CLI is available - Docker container image is available -## Installing dependencies +## Install build dependencies Blast requires some C/C++ libraries if you need to enable cld2, icu, libstemmer or leveldb. The following sections are instructions for satisfying dependencies on particular platforms. @@ -90,63 +69,73 @@ $ sudo cp *.so /usr/local/lib ``` -## Building Blast +## Build -When you satisfied dependencies, let's build Blast for Linux as following: +Building Blast as following: ```bash $ mkdir -p ${GOPATH}/src/github.com/mosuka $ cd ${GOPATH}/src/github.com/mosuka $ git clone https://github.com/mosuka/blast.git $ cd blast -$ make build +$ make +``` + +If you omit `GOOS` or `GOARCH`, it will build the binary of the platform you are using. +If you want to specify the target platform, please set `GOOS` and `GOARCH` environment variables. + +### Linux + +```bash +$ make GOOS=linux build ``` -If you want to build for other platform, set `GOOS`, `GOARCH` environment variables. For example, build for macOS like following: +### macOS ```bash -$ make \ - GOOS=darwin \ - build +$ make GOOS=darwin build ``` -Blast supports some [Bleve Extensions (blevex)](https://github.com/blevesearch/blevex). If you want to build with them, please set `CGO_LDFLAGS`, `CGO_CFLAGS`, `CGO_ENABLED` and `BUILD_TAGS`. For example, build LevelDB to be available for index storage as follows: +### Windows ```bash -$ make \ - GOOS=linux \ - BUILD_TAGS=leveldb \ - CGO_ENABLED=1 \ - build +$ make GOOS=windows build ``` -You can enable all the Bleve extensions supported by Blast as follows: +## Build with extensions + +Blast supports some Bleve Extensions (blevex). If you want to build with them, please set CGO_LDFLAGS, CGO_CFLAGS, CGO_ENABLED and BUILD_TAGS. For example, build LevelDB to be available for index storage as follows: + +```bash +$ make GOOS=linux \ + BUILD_TAGS=icu \ + CGO_ENABLED=1 \ + build +``` -### Linux +### Linux ```bash -$ make \ - GOOS=linux \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - CGO_ENABLED=1 \ - build +$ make GOOS=linux \ + BUILD_TAGS="kagome icu libstemmer cld2" \ + CGO_ENABLED=1 \ + build ``` ### macOS ```bash -$ make \ - GOOS=darwin \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - CGO_ENABLED=1 \ - CGO_LDFLAGS="-L/usr/local/opt/icu4c/lib" \ - CGO_CFLAGS="-I/usr/local/opt/icu4c/include" \ - build +$ make GOOS=darwin \ + BUILD_TAGS="kagome icu libstemmer cld2" \ + CGO_ENABLED=1 \ + CGO_LDFLAGS="-L/usr/local/opt/icu4c/lib" \ + CGO_CFLAGS="-I/usr/local/opt/icu4c/include" \ + build ``` -### Build flags +### Buil flags -Please refer to the following table for details of Bleve Extensions: +Refer to the following table for the build flags of the supported Bleve extensions: | BUILD_TAGS | CGO_ENABLED | Description | | ---------- | ----------- | ----------- | @@ -154,13 +143,11 @@ Please refer to the following table for details of Bleve Extensions: | kagome | 0 | Enable Japanese Language Analyser | | icu | 1 | Enable ICU Tokenizer, Thai Language Analyser | | libstemmer | 1 | Enable Language Stemmer (Danish, German, English, Spanish, Finnish, French, Hungarian, Italian, Dutch, Norwegian, Portuguese, Romanian, Russian, Swedish, Turkish) | -| cznicb | 0 | Enable cznicb KV store | -| leveldb | 1 | Enable LevelDB | -| badger | 0 | Enable Badger (This feature is considered experimental) | -If you want to enable the feature whose `CGO_ENABLE` is `1`, please install it referring to the Installing dependencies section above. +If you want to enable the feature whose `CGO_ENABLE` is `1`, please install it referring to the Install build dependencies section above. -### Binaries + +## Binary You can see the binary file when build successful like so: @@ -170,718 +157,365 @@ blast ``` -## Testing Blast +## Test If you want to test your changes, run command like following: ```bash -$ make \ - test +$ make test ``` -You can test with all the Bleve extensions supported by Blast as follows: +If you want to specify the target platform, set `GOOS` and `GOARCH` environment variables in the same way as the build. -### Linux -```bash -$ make \ - GOOS=linux \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - CGO_ENABLED=1 \ - test -``` +## Package -### macOS +To create a distribution package, run the following command: ```bash -$ make \ - GOOS=darwin \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - CGO_ENABLED=1 \ - CGO_LDFLAGS="-L/usr/local/opt/icu4c/lib" \ - CGO_CFLAGS="-I/usr/local/opt/icu4c/include" \ - test +$ make dist ``` -## Packaging Blast +## Configure -### Linux +Blast can change its startup options with configuration files, environment variables, and command line arguments. +Refer to the following table for the options that can be configured. -```bash -$ make \ - GOOS=linux \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - CGO_ENABLED=1 \ - dist -``` +| CLI Flag | Environment variable | Configuration File | Description | +| --- | --- | --- | --- | +| --config-file | - | - | config file. if omitted, blast.yaml in /etc and home directory will be searched | +| --id | BLAST_ID | id | node ID | +| --raft-address | BLAST_RAFT_ADDRESS | raft_address | Raft server listen address | +| --grpc-address | BLAST_GRPC_ADDRESS | grpc_address | gRPC server listen address | +| --http-address | BLAST_HTTP_ADDRESS | http_address | HTTP server listen address | +| --data-directory | BLAST_DATA_DIRECTORY | data_directory | data directory which store the index and Raft logs | +| --mapping-file | BLAST_MAPPING_FILE | mapping_file | path to the index mapping file | +| --peer-grpc-address | BLAST_PEER_GRPC_ADDRESS | peer_grpc_address | listen address of the existing gRPC server in the joining cluster | +| --certificate-file | BLAST_CERTIFICATE_FILE | certificate_file | path to the client server TLS certificate file | +| --key-file | BLAST_KEY_FILE | key_file | path to the client server TLS key file | +| --common-name | BLAST_COMMON_NAME | common_name | certificate common name | +| --log-level | BLAST_LOG_LEVEL | log_level | log level | +| --log-file | BLAST_LOG_FILE | log_file | log file | +| --log-max-size | BLAST_LOG_MAX_SIZE | log_max_size | max size of a log file in megabytes | +| --log-max-backups | BLAST_LOG_MAX_BACKUPS | log_max_backups | max backup count of log files | +| --log-max-age | BLAST_LOG_MAX_AGE | log_max_age | max age of a log file in days | +| --log-compress | BLAST_LOG_COMPRESS | log_compress | compress a log file | -### macOS -```bash -$ make \ - GOOS=darwin \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - CGO_ENABLED=1 \ - CGO_LDFLAGS="-L/usr/local/opt/icu4c/lib" \ - CGO_CFLAGS="-I/usr/local/opt/icu4c/include" \ - dist -``` +## Start +Starting server is easy as follows: -## Starting Blast in standalone mode - -![standalone](https://user-images.githubusercontent.com/970948/59768879-138f5180-92e0-11e9-8b33-c7b1a93e0893.png) +```bash +$ ./bin/blast start \ + --id=node1 \ + --raft-address=:7000 \ + --http-address=:8000 \ + --grpc-address=:9000 \ + --data-directory=/tmp/blast/node1 \ + --mapping-file=./examples/example_mapping.json +``` -Running a Blast in standalone mode is easy. Start a indexer like so: +You can get the node information with the following command: ```bash -$ ./bin/blast indexer start \ - --grpc-address=:5000 \ - --grpc-gateway-address=:6000 \ - --http-address=:8000 \ - --node-id=indexer1 \ - --node-address=:2000 \ - --data-dir=/tmp/blast/indexer1 \ - --raft-storage-type=boltdb \ - --index-mapping-file=./example/wiki_index_mapping.json \ - --index-type=upside_down \ - --index-storage-type=boltdb +$ ./bin/blast node | jq . ``` -Please refer to following document for details of index mapping: -- http://blevesearch.com/docs/Terminology/ -- http://blevesearch.com/docs/Text-Analysis/ -- http://blevesearch.com/docs/Index-Mapping/ -- https://github.com/blevesearch/bleve/blob/master/mapping/index.go#L43 - -You can check the node with the following command: +or the following URL: ```bash -$ ./bin/blast indexer node info --grpc-address=:5000 | jq . +$ curl -X GET http://localhost:8000/v1/node | jq . ``` -You can see the result in JSON format. The result of the above command is: +The result of the above command is: ```json { "node": { - "id": "indexer1", - "bind_address": ":2000", - "state": 3, + "raft_address": ":7000", "metadata": { - "grpc_address": ":5000", - "grpc_gateway_address": ":6000", + "grpc_address": ":9000", "http_address": ":8000" - } + }, + "state": "Leader" } } ``` -You can now put, get, search and delete the documents via CLI. - -### Indexing a document via CLI +## Health check -For document indexing, execute the following command: +You can check the health status of the node. ```bash -$ ./bin/blast indexer index --grpc-address=:5000 enwiki_1 ' -{ - "fields": { - "title_en": "Search engine (computing)", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "_type": "enwiki" - } -} -' | jq . +$ ./bin/blast healthcheck | jq . ``` -or +Also provides the following REST APIs -```bash -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/wiki_doc_enwiki_1.json | jq . -``` +### Liveness prove -You can see the result in JSON format. The result of the above command is: +This endpoint always returns 200 and should be used to check server health. -```json -{} +```bash +$ curl -X GET http://localhost:8000/v1/liveness_check | jq . ``` -### Getting a document via CLI +### Readiness probe -Getting a document is as following: +This endpoint returns 200 when server is ready to serve traffic (i.e. respond to queries). ```bash -$ ./bin/blast indexer get --grpc-address=:5000 enwiki_1 | jq . +$ curl -X GET http://localhost:8000/v1/readiness_check | jq . ``` -You can see the result in JSON format. The result of the above command is: +## Put a document -```json +To put a document, execute the following command: + +```bash +$ ./bin/blast set 1 ' { "fields": { - "_type": "enwiki", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" + "_type": "example" } } +' | jq . ``` -### Searching documents via CLI - -Searching documents is as like following: +or, you can use the RESTful API as follows: ```bash -$ ./bin/blast indexer search --grpc-address=:5000 --file=./example/wiki_search_request.json | jq . -``` - -You can see the result in JSON format. The result of the above command is: - -```json +$ curl -X PUT 'http://127.0.0.1:8000/v1/documents/1' --data-binary ' { - "search_result": { - "status": { - "total": 1, - "failed": 0, - "successful": 1 - }, - "request": { - "query": { - "query": "+_all:search" - }, - "size": 10, - "from": 0, - "highlight": { - "style": "html", - "fields": [ - "title", - "text" - ] - }, - "fields": [ - "*" - ], - "facets": { - "Timestamp range": { - "size": 10, - "field": "timestamp", - "date_ranges": [ - { - "end": "2010-12-31T23:59:59Z", - "name": "2001 - 2010", - "start": "2001-01-01T00:00:00Z" - }, - { - "end": "2020-12-31T23:59:59Z", - "name": "2011 - 2020", - "start": "2011-01-01T00:00:00Z" - } - ] - }, - "Type count": { - "size": 10, - "field": "_type" - } - }, - "explain": false, - "sort": [ - "-_score", - "_id", - "-timestamp" - ], - "includeLocations": false - }, - "hits": [ - { - "index": "/tmp/blast/indexer1/index", - "id": "enwiki_1", - "score": 0.09703538256409851, - "locations": { - "text_en": { - "search": [ - { - "pos": 2, - "start": 2, - "end": 8, - "array_positions": null - }, - { - "pos": 20, - "start": 118, - "end": 124, - "array_positions": null - }, - { - "pos": 33, - "start": 195, - "end": 201, - "array_positions": null - }, - { - "pos": 68, - "start": 415, - "end": 421, - "array_positions": null - }, - { - "pos": 73, - "start": 438, - "end": 444, - "array_positions": null - }, - { - "pos": 76, - "start": 458, - "end": 466, - "array_positions": null - } - ] - }, - "title_en": { - "search": [ - { - "pos": 1, - "start": 0, - "end": 6, - "array_positions": null - } - ] - } - }, - "sort": [ - "_score", - "enwiki_1", - " \u0001\u0015\u001f\u0004~80Pp\u0000" - ], - "fields": { - "_type": "enwiki", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" - } - } - ], - "total_hits": 1, - "max_score": 0.09703538256409851, - "took": 122105, - "facets": { - "Timestamp range": { - "field": "timestamp", - "total": 1, - "missing": 0, - "other": 0, - "date_ranges": [ - { - "name": "2011 - 2020", - "start": "2011-01-01T00:00:00Z", - "end": "2020-12-31T23:59:59Z", - "count": 1 - } - ] - }, - "Type count": { - "field": "_type", - "total": 1, - "missing": 0, - "other": 0, - "terms": [ - { - "term": "enwiki", - "count": 1 - } - ] - } - } + "fields": { + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "_type": "example" } } +' | jq . ``` -Please refer to following document for details of search request and result: -- http://blevesearch.com/docs/Query/ -- http://blevesearch.com/docs/Query-String-Query/ -- http://blevesearch.com/docs/Sorting/ -- https://github.com/blevesearch/bleve/blob/master/search.go#L267 -- https://github.com/blevesearch/bleve/blob/master/search.go#L443 - -### Deleting a document via CLI - -Deleting a document is as following: +or ```bash -$ ./bin/blast indexer delete --grpc-address=:5000 enwiki_1 -``` - -You can see the result in JSON format. The result of the above command is: - -```json -{} +$ curl -X PUT 'http://127.0.0.1:8000/v1/documents/1' -H "Content-Type: application/json" --data-binary @./examples/example_doc_1.json ``` -### Indexing documents in bulk via CLI +## Get a document -Indexing documents in bulk, run the following command: +To get a document, execute the following command: ```bash -$ ./bin/blast indexer index --grpc-address=:5000 --file=./example/wiki_bulk_index.jsonl --bulk | jq . +$ ./bin/blast get 1 | jq . ``` -You can see the result in JSON format. The result of the above command is: - -```json -{ - "count": 36 -} -``` - -### Deleting documents in bulk via CLI - -Deleting documents in bulk, run the following command: +or, you can use the RESTful API as follows: ```bash -$ ./bin/blast indexer delete --grpc-address=:5000 --file=./example/wiki_bulk_delete.txt | jq . +$ curl -X GET 'http://127.0.0.1:8000/v1/documents/1' | jq . ``` -You can see the result in JSON format. The result of the above command is: +You can see the result. The result of the above command is: ```json { - "count": 36 + "fields": { + "_type": "example", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "title": "Search engine (computing)" + } } ``` +## Search documents -## Using HTTP REST API - -Also you can do above commands via HTTP REST API that listened port 5002. - -### Indexing a document via HTTP REST API - -Indexing a document via HTTP is as following: +To search documents, execute the following command: ```bash -$ curl -X PUT 'http://127.0.0.1:6000/v1/documents/enwiki_1' -H 'Content-Type: application/json' --data-binary ' +$ ./bin/blast search ' { - "fields": { - "title_en": "Search engine (computing)", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "_type": "enwiki" + "search_request": { + "query": { + "query": "+_all:search" + }, + "size": 10, + "from": 0, + "fields": [ + "*" + ], + "sort": [ + "-_score" + ] } } ' | jq . ``` -or +or, you can use the RESTful API as follows: ```bash -$ curl -X PUT 'http://127.0.0.1:6000/v1/documents' -H 'Content-Type: application/json' --data-binary @./example/wiki_doc_enwiki_1.json | jq . -``` - -You can see the result in JSON format. The result of the above command is: - -```json -{} -``` - -### Getting a document via HTTP REST API - -Getting a document via HTTP is as following: - -```bash -$ curl -X GET 'http://127.0.0.1:6000/v1/documents/enwiki_1' -H 'Content-Type: application/json' | jq . -``` - -You can see the result in JSON format. The result of the above command is: - -```json +$ curl -X POST 'http://127.0.0.1:8000/v1/search' --data-binary ' { - "fields": { - "_type": "enwiki", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" + "search_request": { + "query": { + "query": "+_all:search" + }, + "size": 10, + "from": 0, + "fields": [ + "*" + ], + "sort": [ + "-_score" + ] } } +' | jq . ``` -### Searching documents via HTTP REST API - -Searching documents via HTTP is as following: - -```bash -$ curl -X POST 'http://127.0.0.1:6000/v1/search' -H 'Content-Type: application/json' --data-binary @./example/wiki_search_request.json | jq . -``` - -You can see the result in JSON format. The result of the above command is: +You can see the result. The result of the above command is: ```json { "search_result": { - "status": { - "total": 1, - "failed": 0, - "successful": 1 - }, - "request": { - "query": { - "query": "+_all:search" - }, - "size": 10, - "from": 0, - "highlight": { - "style": "html", - "fields": [ - "title", - "text" - ] - }, - "fields": [ - "*" - ], - "facets": { - "Timestamp range": { - "size": 10, - "field": "timestamp", - "date_ranges": [ - { - "end": "2010-12-31T23:59:59Z", - "name": "2001 - 2010", - "start": "2001-01-01T00:00:00Z" - }, - { - "end": "2020-12-31T23:59:59Z", - "name": "2011 - 2020", - "start": "2011-01-01T00:00:00Z" - } - ] - }, - "Type count": { - "size": 10, - "field": "_type" - } - }, - "explain": false, - "sort": [ - "-_score", - "_id", - "-timestamp" - ], - "includeLocations": false - }, + "facets": null, "hits": [ { - "index": "/tmp/blast/indexer1/index", - "id": "enwiki_1", - "score": 0.09703538256409851, - "locations": { - "text_en": { - "search": [ - { - "pos": 2, - "start": 2, - "end": 8, - "array_positions": null - }, - { - "pos": 20, - "start": 118, - "end": 124, - "array_positions": null - }, - { - "pos": 33, - "start": 195, - "end": 201, - "array_positions": null - }, - { - "pos": 68, - "start": 415, - "end": 421, - "array_positions": null - }, - { - "pos": 73, - "start": 438, - "end": 444, - "array_positions": null - }, - { - "pos": 76, - "start": 458, - "end": 466, - "array_positions": null - } - ] - }, - "title_en": { - "search": [ - { - "pos": 1, - "start": 0, - "end": 6, - "array_positions": null - } - ] - } - }, - "sort": [ - "_score", - "enwiki_1", - " \u0001\u0015\u001f\u0004~80Pp\u0000" - ], "fields": { - "_type": "enwiki", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "_type": "example", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" - } + "title": "Search engine (computing)" + }, + "id": "1", + "index": "/tmp/blast/node1/index", + "score": 0.09703538256409851, + "sort": [ + "_score" + ] } ], - "total_hits": 1, "max_score": 0.09703538256409851, - "took": 323568, - "facets": { - "Timestamp range": { - "field": "timestamp", - "total": 1, - "missing": 0, - "other": 0, - "date_ranges": [ - { - "name": "2011 - 2020", - "start": "2011-01-01T00:00:00Z", - "end": "2020-12-31T23:59:59Z", - "count": 1 - } - ] + "request": { + "explain": false, + "facets": null, + "fields": [ + "*" + ], + "from": 0, + "highlight": null, + "includeLocations": false, + "query": { + "query": "+_all:search" }, - "Type count": { - "field": "_type", - "total": 1, - "missing": 0, - "other": 0, - "terms": [ - { - "term": "enwiki", - "count": 1 - } - ] - } - } + "search_after": null, + "search_before": null, + "size": 10, + "sort": [ + "-_score" + ] + }, + "status": { + "failed": 0, + "successful": 1, + "total": 1 + }, + "took": 171880, + "total_hits": 1 } } ``` -### Deleting a document via HTTP REST API +## Delete a document -Deleting a document via HTTP is as following: +Deleting a document, execute the following command: ```bash -$ curl -X DELETE 'http://127.0.0.1:6000/v1/documents/enwiki_1' -H 'Content-Type: application/json' | jq . +$ ./bin/blast delete 1 ``` -You can see the result in JSON format. The result of the above command is: +or, you can use the RESTful API as follows: -```json -{} +```bash +$ curl -X DELETE 'http://127.0.0.1:8000/v1/documents/1' ``` -### Indexing documents in bulk via HTTP REST API +## Index documents in bulk -Indexing documents in bulk via HTTP is as following: +To index documents in bulk, execute the following command: ```bash -$ curl -X PUT 'http://127.0.0.1:6000/v1/bulk' -H 'Content-Type: application/x-ndjson' --data-binary @./example/wiki_bulk_index.jsonl | jq . +$ ./bin/blast bulk-index --file ./examples/example_bulk_index.json ``` -You can see the result in JSON format. The result of the above command is: +or, you can use the RESTful API as follows: -```json -{ - "count": 36 -} +```bash +$ curl -X PUT 'http://127.0.0.1:8000/v1/documents' -H "Content-Type: application/x-ndjson" --data-binary @./examples/example_bulk_index.json ``` -### Deleting documents in bulk via HTTP REST API +## Delete documents in bulk -Deleting documents in bulk via HTTP is as following: +To delete documents in bulk, execute the following command: ```bash -$ curl -X DELETE 'http://127.0.0.1:6000/v1/bulk' -H 'Content-Type: text/plain' --data-binary @./example/wiki_bulk_delete.txt | jq . +$ ./bin/blast bulk-delete --file ./examples/example_bulk_delete.txt ``` -You can see the result in JSON format. The result of the above command is: +or, you can use the RESTful API as follows: -```json -{ - "count": 36 -} +```bash +$ curl -X DELETE 'http://127.0.0.1:8000/v1/documents' -H "Content-Type: text/plain" --data-binary @./examples/example_bulk_delete.txt ``` +## Bringing up a cluster -## Starting Blast in cluster mode - -![cluster](https://user-images.githubusercontent.com/970948/59768677-bf846d00-92df-11e9-8a70-92496ff55ce7.png) - -Blast can easily bring up a cluster. Running a Blast in standalone is not fault tolerant. If you need to improve fault tolerance, start two more indexers as follows: - -First of all, start a indexer in standalone. +Blast is easy to bring up the cluster. the node is already running, but that is not fault tolerant. If you need to increase the fault tolerance, bring up 2 more data nodes like so: ```bash -$ ./bin/blast indexer start \ - --grpc-address=:5000 \ - --grpc-gateway-address=:6000 \ - --http-address=:8000 \ - --node-id=indexer1 \ - --node-address=:2000 \ - --data-dir=/tmp/blast/indexer1 \ - --raft-storage-type=boltdb \ - --index-mapping-file=./example/wiki_index_mapping.json \ - --index-type=upside_down \ - --index-storage-type=boltdb +$ ./bin/blast start \ + --id=node2 \ + --raft-address=:7001 \ + --http-address=:8001 \ + --grpc-address=:9001 \ + --peer-grpc-address=:9000 \ + --data-directory=/tmp/blast/node2 \ + --mapping-file=./examples/example_mapping.json ``` -Then, start two more indexers. - ```bash -$ ./bin/blast indexer start \ - --peer-grpc-address=:5000 \ - --grpc-address=:5010 \ - --grpc-gateway-address=:6010 \ - --http-address=:8010 \ - --node-id=indexer2 \ - --node-address=:2010 \ - --data-dir=/tmp/blast/indexer2 \ - --raft-storage-type=boltdb - -$ ./bin/blast indexer start \ - --peer-grpc-address=:5000 \ - --grpc-address=:5020 \ - --grpc-gateway-address=:6020 \ - --http-address=:8020 \ - --node-id=indexer3 \ - --node-address=:2020 \ - --data-dir=/tmp/blast/indexer3 \ - --raft-storage-type=boltdb +$ ./bin/blast start \ + --id=node3 \ + --raft-address=:7002 \ + --http-address=:8002 \ + --grpc-address=:9002 \ + --peer-grpc-address=:9000 \ + --data-directory=/tmp/blast/node3 \ + --mapping-file=./examples/example_mapping.json ``` + _Above example shows each Blast node running on the same host, so each node must listen on different ports. This would not be necessary if each node ran on a different host._ -This instructs each new node to join an existing node, specifying `--peer-addr=:5001`. Each node recognizes the joining clusters when started. -So you have a 3-node cluster. That way you can tolerate the failure of 1 node. You can check the peers in the cluster with the following command: +This instructs each new node to join an existing node, each node recognizes the joining clusters when started. +So you have a 3-node cluster. That way you can tolerate the failure of 1 node. You can check the cluster with the following command: ```bash -$ ./bin/blast indexer cluster info --grpc-address=:5000 | jq . +$ ./bin/blast cluster | jq . ``` -or +or, you can use the RESTful API as follows: ```bash -$ curl -X GET 'http://127.0.0.1:6000/v1/cluster/status' -H 'Content-Type: application/json' | jq . +$ curl -X GET 'http://127.0.0.1:8000/v1/cluster' | jq . ``` You can see the result in JSON format. The result of the above command is: @@ -890,249 +524,121 @@ You can see the result in JSON format. The result of the above command is: { "cluster": { "nodes": { - "indexer1": { - "id": "indexer1", - "bind_address": ":2000", - "state": 1, + "node1": { + "raft_address": ":7000", "metadata": { - "grpc_address": ":5000", - "grpc_gateway_address": ":6000", + "grpc_address": ":9000", "http_address": ":8000" - } + }, + "state": "Leader" }, - "indexer2": { - "id": "indexer2", - "bind_address": ":2010", - "state": 1, + "node2": { + "raft_address": ":7001", "metadata": { - "grpc_address": ":5010", - "grpc_gateway_address": ":6010", - "http_address": ":8010" - } + "grpc_address": ":9001", + "http_address": ":8001" + }, + "state": "Follower" }, - "indexer3": { - "id": "indexer3", - "bind_address": ":2020", - "state": 3, + "node3": { + "raft_address": ":7002", "metadata": { - "grpc_address": ":5020", - "grpc_gateway_address": ":6020", - "http_address": ":8020" - } + "grpc_address": ":9002", + "http_address": ":8002" + }, + "state": "Follower" } - } + }, + "leader": "node1" } } ``` Recommend 3 or more odd number of nodes in the cluster. In failure scenarios, data loss is inevitable, so avoid deploying single nodes. -The following command indexes documents to any node in the cluster: +The above example, the node joins to the cluster at startup, but you can also join the node that already started on standalone mode to the cluster later, as follows: ```bash -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/wiki_doc_enwiki_1.json | jq . +$ ./bin/blast join --grpc-address=:9000 node2 127.0.0.1:9001 ``` -So, you can get the document from the node specified by the above command as follows: +or, you can use the RESTful API as follows: ```bash -$ ./bin/blast indexer get --grpc-address=:5000 enwiki_1 | jq . -``` - -You can see the result in JSON format. The result of the above command is: - -```json +$ curl -X PUT 'http://127.0.0.1:8000/v1/cluster/node2' --data-binary ' { - "fields": { - "_type": "enwiki", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" + "raft_address": ":7001", + "metadata": { + "grpc_address": ":9001", + "http_address": ":8001" } } +' ``` -You can also get the same document from other nodes in the cluster as follows: +To remove a node from the cluster, execute the following command: ```bash -$ ./bin/blast indexer get --grpc-address=:5010 enwiki_1 | jq . -$ ./bin/blast indexer get --grpc-address=:5020 enwiki_1 | jq . +$ ./bin/blast leave --grpc-address=:9000 node2 ``` -You can see the result in JSON format. The result of the above command is: +or, you can use the RESTful API as follows: -```json +```bash +$ curl -X DELETE 'http://127.0.0.1:8000/v1/cluster/node2' +``` + +The following command indexes documents to any node in the cluster: + +```bash +$ ./bin/blast set 1 ' { "fields": { - "_type": "enwiki", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" + "_type": "example" } } +' --grpc-address=:9000 | jq . ``` - -## Starting Blast in federated mode (experimental) - -![federation](https://user-images.githubusercontent.com/970948/59768498-6f0d0f80-92df-11e9-8538-2a1c6e44c30a.png) - -Running a Blast in cluster mode allows you to replicate the index among indexers in a cluster to improve fault tolerance. -However, as the index grows, performance degradation can become an issue. Therefore, instead of providing a large single physical index, it is better to distribute indices across multiple indexers. -Blast provides a federated mode to enable distributed search and indexing. - -Blast provides the following type of node for federation: -- manager: Manager manage common index mappings to index across multiple indexers. It also manages information and status of clusters that participate in the federation. -- dispatcher: Dispatcher is responsible for distributed search or indexing of each indexer. In the case of a index request, send document to each cluster based on the document ID. And in the case of a search request, the same query is sent to each cluster, then the search results are merged and returned to the client. - -### Bring up the manager cluster - -Manager can also bring up a cluster like an indexer. Specify a common index mapping for federation at startup. - -```bash -$ ./bin/blast manager start \ - --grpc-address=:5100 \ - --grpc-gateway-address=:6100 \ - --http-address=:8100 \ - --node-id=manager1 \ - --node-address=:2100 \ - --data-dir=/tmp/blast/manager1 \ - --raft-storage-type=boltdb \ - --index-mapping-file=./example/wiki_index_mapping.json \ - --index-type=upside_down \ - --index-storage-type=boltdb - -$ ./bin/blast manager start \ - --peer-grpc-address=:5100 \ - --grpc-address=:5110 \ - --grpc-gateway-address=:6110 \ - --http-address=:8110 \ - --node-id=manager2 \ - --node-address=:2110 \ - --data-dir=/tmp/blast/manager2 \ - --raft-storage-type=boltdb - -$ ./bin/blast manager start \ - --peer-grpc-address=:5100 \ - --grpc-address=:5120 \ - --grpc-gateway-address=:6120 \ - --http-address=:8120 \ - --node-id=manager3 \ - --node-address=:2120 \ - --data-dir=/tmp/blast/manager3 \ - --raft-storage-type=boltdb -``` - -### Bring up the indexer cluster - -Federated mode differs from cluster mode that it specifies the manager in start up to bring up indexer cluster. -The following example starts two 3-node clusters. +So, you can get the document from the node specified by the above command as follows: ```bash -$ ./bin/blast indexer start \ - --manager-grpc-address=:5100 \ - --shard-id=shard1 \ - --grpc-address=:5000 \ - --grpc-gateway-address=:6000 \ - --http-address=:8000 \ - --node-id=indexer1 \ - --node-address=:2000 \ - --data-dir=/tmp/blast/indexer1 \ - --raft-storage-type=boltdb - -$ ./bin/blast indexer start \ - --manager-grpc-address=:5100 \ - --shard-id=shard1 \ - --grpc-address=:5010 \ - --grpc-gateway-address=:6010 \ - --http-address=:8010 \ - --node-id=indexer2 \ - --node-address=:2010 \ - --data-dir=/tmp/blast/indexer2 \ - --raft-storage-type=boltdb - -$ ./bin/blast indexer start \ - --manager-grpc-address=:5100 \ - --shard-id=shard1 \ - --grpc-address=:5020 \ - --grpc-gateway-address=:6020 \ - --http-address=:8020 \ - --node-id=indexer3 \ - --node-address=:2020 \ - --data-dir=/tmp/blast/indexer3 \ - --raft-storage-type=boltdb - -$ ./bin/blast indexer start \ - --manager-grpc-address=:5100 \ - --shard-id=shard2 \ - --grpc-address=:5030 \ - --grpc-gateway-address=:6030 \ - --http-address=:8030 \ - --node-id=indexer4 \ - --node-address=:2030 \ - --data-dir=/tmp/blast/indexer4 \ - --raft-storage-type=boltdb - -$ ./bin/blast indexer start \ - --manager-grpc-address=:5100 \ - --shard-id=shard2 \ - --grpc-address=:5040 \ - --grpc-gateway-address=:6040 \ - --http-address=:8040 \ - --node-id=indexer5 \ - --node-address=:2040 \ - --data-dir=/tmp/blast/indexer5 \ - --raft-storage-type=boltdb - -$ ./bin/blast indexer start \ - --manager-grpc-address=:5100 \ - --shard-id=shard2 \ - --grpc-address=:5050 \ - --grpc-gateway-address=:6050 \ - --http-address=:8050 \ - --node-id=indexer6 \ - --node-address=:2050 \ - --data-dir=/tmp/blast/indexer6 \ - --raft-storage-type=boltdb +$ ./bin/blast get 1 --grpc-address=:9000 | jq . ``` -### Start up the dispatcher - -Finally, start the dispatcher with a manager that manages the target federation so that it can perform distributed search and indexing. +You can see the result. The result of the above command is: -```bash -$ ./bin/blast dispatcher start \ - --manager-grpc-address=:5100 \ - --grpc-address=:5200 \ - --grpc-gateway-address=:6200 \ - --http-address=:8200 +```text +value1 ``` -### Check the cluster info - -```bash -$ ./bin/blast manager cluster info --grpc-address=:5100 | jq . -$ ./bin/blast indexer cluster info --grpc-address=:5000 | jq . -$ ./bin/blast indexer cluster info --grpc-address=:5030 | jq . -$ ./bin/blast manager get cluster --grpc-address=:5100 --format=json | jq . -``` +You can also get the same document from other nodes in the cluster as follows: ```bash -$ ./bin/blast dispatcher index --grpc-address=:5200 --file=./example/wiki_bulk_index.jsonl --bulk | jq . +$ ./bin/blast get 1 --grpc-address=:9001 | jq . +$ ./bin/blast get 1 --grpc-address=:9002 | jq . ``` -```bash -$ ./bin/blast dispatcher search --grpc-address=:5200 --file=./example/wiki_search_request_simple.json | jq . -``` +You can see the result. The result of the above command is: -```bash -$ ./bin/blast dispatcher delete --grpc-address=:5200 --file=./example/wiki_bulk_delete.txt | jq . +```json +{ + "fields": { + "_type": "example", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "title": "Search engine (computing)" + } +} ``` -## Blast on Docker +## Docker -### Building Docker container image on localhost +### Build Docker container image You can build the Docker container image like so: @@ -1140,7 +646,7 @@ You can build the Docker container image like so: $ make docker-build ``` -### Pulling Docker container image from docker.io +### Pull Docker container image from docker.io You can also use the Docker container image already registered in docker.io like so: @@ -1150,154 +656,101 @@ $ docker pull mosuka/blast:latest See https://hub.docker.com/r/mosuka/blast/tags/ -### Pulling Docker container image from docker.io - -You can also use the Docker container image already registered in docker.io like so: - -```bash -$ docker pull mosuka/blast:latest -``` - -### Running Indexer on Docker +### Start on Docker -Running a Blast data node on Docker. Start Blast data node like so: +Running a Blast data node on Docker. Start Blast node like so: ```bash -$ docker run --rm --name blast-indexer1 \ - -p 2000:2000 \ - -p 5000:5000 \ - -p 6000:6000 \ +$ docker run --rm --name blast-node1 \ + -p 7000:7000 \ -p 8000:8000 \ - -v $(pwd)/example:/opt/blast/example \ - mosuka/blast:latest blast indexer start \ - --grpc-address=:5000 \ - --grpc-gateway-address=:6000 \ + -p 9000:9000 \ + -v $(pwd)/etc/blast_mapping.json:/etc/blast_mapping.json \ + mosuka/blast:latest start \ + --id=node1 \ + --raft-address=:7000 \ --http-address=:8000 \ - --node-id=blast-indexer1 \ - --node-address=:2000 \ - --data-dir=/tmp/blast/indexer1 \ - --raft-storage-type=boltdb \ - --index-mapping-file=/opt/blast/example/wiki_index_mapping.json \ - --index-type=upside_down \ - --index-storage-type=boltdb + --grpc-address=:9000 \ + --data-directory=/tmp/blast/node1 \ + --mapping-file=/etc/blast_mapping.json ``` You can execute the command in docker container as follows: ```bash -$ docker exec -it blast-indexer1 blast indexer node info --grpc-address=:5000 +$ docker exec -it blast-node1 blast node --grpc-address=:9000 ``` -### Running cluster on Docker compose +## Securing Blast -Also, running a Blast cluster on Docker compose. +Blast supports HTTPS access, ensuring that all communication between clients and a cluster is encrypted. -```bash -$ docker-compose up -d manager1 -$ docker-compose up -d indexer1 -$ docker-compose up -d indexer2 -$ docker-compose up -d indexer3 -$ docker-compose up -d indexer4 -$ docker-compose up -d indexer5 -$ docker-compose up -d indexer6 -$ docker-compose up -d dispatcher1 -$ docker-compose ps -$ ./bin/blast manager get --grpc-address=127.0.0.1:5110 /cluster | jq . -$ ./bin/blast dispatcher index --grpc-address=127.0.0.1:5210 --file=./example/wiki_bulk_index.jsonl --bulk | jq . -$ ./bin/blast dispatcher search --grpc-address=127.0.0.1:5210 --file=./example/wiki_search_request_simple.json | jq . -``` +### Generating a certificate and private key -```bash -$ docker-compose down -``` - - -## Wikipedia example - -This section explain how to index Wikipedia dump to Blast. - -### Install wikiextractor +One way to generate the necessary resources is via [openssl](https://www.openssl.org/). For example: ```bash -$ cd ${HOME} -$ git clone git@github.com:attardi/wikiextractor.git +$ openssl req -x509 -nodes -newkey rsa:4096 -keyout ./etc/blast_key.pem -out ./etc/blast_cert.pem -days 365 -subj '/CN=localhost' +Generating a 4096 bit RSA private key +............................++ +........++ +writing new private key to 'key.pem' ``` -### Download wikipedia dump +### Secure cluster example -```bash -$ curl -o ~/tmp/enwiki-20190101-pages-articles.xml.bz2 https://dumps.wikimedia.org/enwiki/20190101/enwiki-20190101-pages-articles.xml.bz2 -``` - -### Parsing wikipedia dump +Starting a node with HTTPS enabled, node-to-node encryption, and with the above configuration file. It is assumed the HTTPS X.509 certificate and key are at the paths server.crt and key.pem respectively. ```bash -$ cd wikiextractor -$ ./WikiExtractor.py -o ~/tmp/enwiki --json ~/tmp/enwiki-20190101-pages-articles.xml.bz2 +$ ./bin/blast start \ + --id=node1 \ + --raft-address=:7000 \ + --http-address=:8000 \ + --grpc-address=:9000 \ + --peer-grpc-address=:9000 \ + --data-directory=/tmp/blast/node1 \ + --mapping-file=./etc/blast_mapping.json \ + --certificate-file=./etc/blast_cert.pem \ + --key-file=./etc/blast_key.pem \ + --common-name=localhost ``` -### Starting Indexer - ```bash -$ ./bin/blast indexer start \ - --grpc-address=:5000 \ - --grpc-gateway-address=:6000 \ - --http-address=:8000 \ - --node-id=indexer1 \ - --node-address=:2000 \ - --data-dir=/tmp/blast/indexer1 \ - --raft-storage-type=boltdb \ - --index-mapping-file=./example/enwiki_index_mapping.json \ - --index-type=upside_down \ - --index-storage-type=boltdb +$ ./bin/blast start \ + --id=node2 \ + --raft-address=:7001 \ + --http-address=:8001 \ + --grpc-address=:9001 \ + --peer-grpc-address=:9000 \ + --data-directory=/tmp/blast/node2 \ + --mapping-file=./etc/blast_mapping.json \ + --certificate-file=./etc/blast_cert.pem \ + --key-file=./etc/blast_key.pem \ + --common-name=localhost ``` -### Indexing wikipedia dump - ```bash -$ for FILE in $(find ~/tmp/enwiki -type f -name '*' | sort) - do - echo "Indexing ${FILE}" - TIMESTAMP=$(date -u "+%Y-%m-%dT%H:%M:%SZ") - DOCS=$(cat ${FILE} | jq -r '. + {fields: {url: .url, title_en: .title, text_en: .text, timestamp: "'${TIMESTAMP}'", _type: "enwiki"}} | del(.url) | del(.title) | del(.text) | del(.fields.id)' | jq -c) - curl -s -X PUT -H 'Content-Type: application/x-ndjson' "http://127.0.0.1:6000/v1/bulk" --data-binary "${DOCS}" - echo "" - done +$ ./bin/blast start \ + --id=node3 \ + --raft-address=:7002 \ + --http-address=:8002 \ + --grpc-address=:9002 \ + --peer-grpc-address=:9000 \ + --data-directory=/tmp/blast/node3 \ + --mapping-file=./etc/blast_mapping.json \ + --certificate-file=./etc/blast_cert.pem \ + --key-file=./etc/blast_key.pem \ + --common-name=localhost ``` - -## Spatial/Geospatial search example - -This section explain how to index Spatial/Geospatial data to Blast. - -### Starting Indexer with Spatial/Geospatial index mapping - -```bash -$ ./bin/blast indexer start \ - --grpc-address=:5000 \ - --http-address=:8000 \ - --node-id=indexer1 \ - --node-address=:2000 \ - --data-dir=/tmp/blast/indexer1 \ - --raft-storage-type=boltdb \ - --index-mapping-file=./example/geo_index_mapping.json \ - --index-type=upside_down \ - --index-storage-type=boltdb -``` - -### Indexing example Spatial/Geospatial data +You can access the cluster by adding a flag, such as the following command: ```bash -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/geo_doc_1.json -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/geo_doc_2.json -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/geo_doc_3.json -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/geo_doc_4.json -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/geo_doc_5.json -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/geo_doc_6.json +$ ./bin/blast cluster --grpc-address=:9000 --certificate-file=./etc/blast_cert.pem --common-name=localhost | jq . ``` -### Searching example Spatial/Geospatial data +or ```bash -$ ./bin/blast indexer search --grpc-address=:5000 --file=./example/geo_search_request.json +$ curl -X GET https://localhost:8000/v1/cluster --cacert ./etc/cert.pem | jq . ``` diff --git a/builtin/config_bleve.go b/builtin/config_bleve.go new file mode 100644 index 0000000..d95e507 --- /dev/null +++ b/builtin/config_bleve.go @@ -0,0 +1,5 @@ +package builtin + +import ( + _ "github.com/blevesearch/bleve/config" +) diff --git a/builtins/config_badger.go b/builtins/config_badger.go deleted file mode 100644 index b920c65..0000000 --- a/builtins/config_badger.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build badger full - -package builtins - -import ( - _ "github.com/mosuka/bbadger" -) diff --git a/builtins/config_bleve.go b/builtins/config_bleve.go deleted file mode 100644 index 031bf9a..0000000 --- a/builtins/config_bleve.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package builtins - -import ( - _ "github.com/blevesearch/bleve/config" -) diff --git a/client/grpc_client.go b/client/grpc_client.go new file mode 100644 index 0000000..c00fd97 --- /dev/null +++ b/client/grpc_client.go @@ -0,0 +1,218 @@ +package client + +import ( + "context" + "log" + "math" + "time" + + "github.com/golang/protobuf/ptypes/empty" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/protobuf" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/status" +) + +type GRPCClient struct { + ctx context.Context + cancel context.CancelFunc + conn *grpc.ClientConn + client protobuf.IndexClient + + logger *log.Logger +} + +func NewGRPCClient(grpc_address string) (*GRPCClient, error) { + return NewGRPCClientWithContext(grpc_address, context.Background()) +} + +func NewGRPCClientWithContext(grpc_address string, baseCtx context.Context) (*GRPCClient, error) { + return NewGRPCClientWithContextTLS(grpc_address, baseCtx, "", "") +} + +func NewGRPCClientWithContextTLS(grpcAddress string, baseCtx context.Context, certificateFile string, commonName string) (*GRPCClient, error) { + dialOpts := []grpc.DialOption{ + grpc.WithDefaultCallOptions( + grpc.MaxCallSendMsgSize(math.MaxInt64), + grpc.MaxCallRecvMsgSize(math.MaxInt64), + ), + grpc.WithKeepaliveParams( + keepalive.ClientParameters{ + Time: 1 * time.Second, + Timeout: 5 * time.Second, + PermitWithoutStream: true, + }, + ), + } + + ctx, cancel := context.WithCancel(baseCtx) + + if certificateFile == "" { + dialOpts = append(dialOpts, grpc.WithInsecure()) + } else { + creds, err := credentials.NewClientTLSFromFile(certificateFile, commonName) + if err != nil { + return nil, err + } + dialOpts = append(dialOpts, grpc.WithTransportCredentials(creds)) + } + + conn, err := grpc.DialContext(ctx, grpcAddress, dialOpts...) + if err != nil { + cancel() + return nil, err + } + + return &GRPCClient{ + ctx: ctx, + cancel: cancel, + conn: conn, + client: protobuf.NewIndexClient(conn), + }, nil +} + +func (c *GRPCClient) Close() error { + c.cancel() + if c.conn != nil { + return c.conn.Close() + } + + return c.ctx.Err() +} + +func (c *GRPCClient) Target() string { + return c.conn.Target() +} + +func (c *GRPCClient) LivenessCheck(opts ...grpc.CallOption) (*protobuf.LivenessCheckResponse, error) { + if resp, err := c.client.LivenessCheck(c.ctx, &empty.Empty{}, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} + +func (c *GRPCClient) ReadinessCheck(opts ...grpc.CallOption) (*protobuf.ReadinessCheckResponse, error) { + if resp, err := c.client.ReadinessCheck(c.ctx, &empty.Empty{}, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} + +func (c *GRPCClient) Join(req *protobuf.JoinRequest, opts ...grpc.CallOption) error { + if _, err := c.client.Join(c.ctx, req, opts...); err != nil { + return err + } + + return nil +} + +func (c *GRPCClient) Leave(req *protobuf.LeaveRequest, opts ...grpc.CallOption) error { + if _, err := c.client.Leave(c.ctx, req, opts...); err != nil { + return err + } + + return nil +} + +func (c *GRPCClient) Node(opts ...grpc.CallOption) (*protobuf.NodeResponse, error) { + if resp, err := c.client.Node(c.ctx, &empty.Empty{}, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} + +func (c *GRPCClient) Cluster(opts ...grpc.CallOption) (*protobuf.ClusterResponse, error) { + if resp, err := c.client.Cluster(c.ctx, &empty.Empty{}, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} + +func (c *GRPCClient) Snapshot(opts ...grpc.CallOption) error { + if _, err := c.client.Snapshot(c.ctx, &empty.Empty{}); err != nil { + return err + } + + return nil +} + +func (c *GRPCClient) Get(req *protobuf.GetRequest, opts ...grpc.CallOption) (*protobuf.GetResponse, error) { + if resp, err := c.client.Get(c.ctx, req, opts...); err != nil { + st, _ := status.FromError(err) + switch st.Code() { + case codes.NotFound: + return nil, errors.ErrNotFound + default: + return nil, err + } + } else { + return resp, nil + } +} + +func (c *GRPCClient) Search(req *protobuf.SearchRequest, opts ...grpc.CallOption) (*protobuf.SearchResponse, error) { + if resp, err := c.client.Search(c.ctx, req, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} + +func (c *GRPCClient) Set(req *protobuf.SetRequest, opts ...grpc.CallOption) error { + if _, err := c.client.Set(c.ctx, req, opts...); err != nil { + return err + } + + return nil +} + +func (c *GRPCClient) Delete(req *protobuf.DeleteRequest, opts ...grpc.CallOption) error { + if _, err := c.client.Delete(c.ctx, req, opts...); err != nil { + return err + } + + return nil +} + +func (c *GRPCClient) BulkIndex(req *protobuf.BulkIndexRequest, opts ...grpc.CallOption) (*protobuf.BulkIndexResponse, error) { + if resp, err := c.client.BulkIndex(c.ctx, req, opts...); err == nil { + return resp, nil + } else { + return nil, err + } +} + +func (c *GRPCClient) BulkDelete(req *protobuf.BulkDeleteRequest, opts ...grpc.CallOption) (*protobuf.BulkDeleteResponse, error) { + if resp, err := c.client.BulkDelete(c.ctx, req, opts...); err == nil { + return resp, nil + } else { + return nil, err + } +} + +func (c *GRPCClient) Mapping(opts ...grpc.CallOption) (*protobuf.MappingResponse, error) { + if resp, err := c.client.Mapping(c.ctx, &empty.Empty{}, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} + +func (c *GRPCClient) Watch(req *empty.Empty, opts ...grpc.CallOption) (protobuf.Index_WatchClient, error) { + return c.client.Watch(c.ctx, req, opts...) +} + +func (c *GRPCClient) Metrics(opts ...grpc.CallOption) (*protobuf.MetricsResponse, error) { + if resp, err := c.client.Metrics(c.ctx, &empty.Empty{}, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} diff --git a/cmd/blast/dispatcher_delete.go b/cmd/blast/dispatcher_delete.go deleted file mode 100644 index 255e350..0000000 --- a/cmd/blast/dispatcher_delete.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - - "github.com/mosuka/blast/dispatcher" - "github.com/mosuka/blast/protobuf/distribute" - "github.com/urfave/cli" -) - -func dispatcherDelete(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - filePath := c.String("file") - id := c.Args().Get(0) - - // create client - client, err := dispatcher.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - marshaler := dispatcher.JsonMarshaler{} - - if id != "" { - req := &distribute.DeleteRequest{ - Id: id, - } - resp, err := client.Delete(req) - if err != nil { - return err - } - respBytes, err := marshaler.Marshal(resp) - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) - } else { - if filePath != "" { - ids := make([]string, 0) - - _, err := os.Stat(filePath) - if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error - return err - } - - // read index mapping file - file, err := os.Open(filePath) - if err != nil { - return err - } - defer func() { - _ = file.Close() - }() - - reader := bufio.NewReader(file) - for { - docIdBytes, _, err := reader.ReadLine() - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - docId := string(docIdBytes) - if docId != "" { - ids = append(ids, docId) - } - break - } - - return err - } - docId := string(docIdBytes) - if docId != "" { - ids = append(ids, docId) - } - } - - req := &distribute.BulkDeleteRequest{ - Ids: ids, - } - - resp, err := client.BulkDelete(req) - if err != nil { - return err - } - - resultBytes, err := marshaler.Marshal(resp) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) - } else { - return errors.New("argument error") - } - } - - return nil -} diff --git a/cmd/blast/dispatcher_get.go b/cmd/blast/dispatcher_get.go deleted file mode 100644 index cc01500..0000000 --- a/cmd/blast/dispatcher_get.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "errors" - "fmt" - "os" - - "github.com/mosuka/blast/dispatcher" - "github.com/mosuka/blast/protobuf/distribute" - "github.com/urfave/cli" -) - -func dispatcherGet(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - id := c.Args().Get(0) - if id == "" { - err := errors.New("arguments are not correct") - return err - } - - client, err := dispatcher.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &distribute.GetRequest{ - Id: id, - } - - res, err := client.Get(req) - if err != nil { - return err - } - - marshaler := dispatcher.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/dispatcher_index.go b/cmd/blast/dispatcher_index.go deleted file mode 100644 index 59dd811..0000000 --- a/cmd/blast/dispatcher_index.go +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/dispatcher" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/distribute" - "github.com/mosuka/blast/protobuf/index" - "github.com/urfave/cli" -) - -func dispatcherIndex(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - filePath := c.String("file") - bulk := c.Bool("bulk") - - // create gRPC client - client, err := dispatcher.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - marshaler := dispatcher.JsonMarshaler{} - - if c.NArg() >= 2 { - // index document by specifying ID and fields via standard input - id := c.Args().Get(0) - fieldsSrc := c.Args().Get(1) - - var fieldsMap map[string]interface{} - err := json.Unmarshal([]byte(fieldsSrc), &fieldsMap) - if err != nil { - return err - } - - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(fieldsMap, fieldsAny) - if err != nil { - return err - } - - req := &distribute.IndexRequest{ - Id: id, - Fields: fieldsAny, - } - - res, err := client.Index(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } else if c.NArg() == 1 { - // index document by specifying document(s) via standard input - docSrc := c.Args().Get(0) - - if bulk { - // jsonl - docs := make([]*index.Document, 0) - reader := bufio.NewReader(bytes.NewReader([]byte(docSrc))) - for { - docBytes, err := reader.ReadBytes('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - break - } - } - - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - } - - req := &distribute.BulkIndexRequest{ - Documents: docs, - } - res, err := client.BulkIndex(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } else { - // json - var docMap map[string]interface{} - err := json.Unmarshal([]byte(docSrc), &docMap) - if err != nil { - return err - } - - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(docMap["fields"].(map[string]interface{}), fieldsAny) - if err != nil { - return err - } - - req := &distribute.IndexRequest{ - Id: docMap["id"].(string), - Fields: fieldsAny, - } - - res, err := client.Index(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } - } else { - // index document by specifying document(s) via file - if filePath != "" { - _, err := os.Stat(filePath) - if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error - return err - } - - // read index mapping file - file, err := os.Open(filePath) - if err != nil { - return err - } - defer func() { - _ = file.Close() - }() - - if bulk { - // jsonl - docs := make([]*index.Document, 0) - reader := bufio.NewReader(file) - for { - docBytes, err := reader.ReadBytes('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - break - } - } - - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - } - - req := &distribute.BulkIndexRequest{ - Documents: docs, - } - res, err := client.BulkIndex(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } else { - // json - docBytes, err := ioutil.ReadAll(file) - if err != nil { - return err - } - var docMap map[string]interface{} - err = json.Unmarshal(docBytes, &docMap) - if err != nil { - return err - } - - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(docMap["fields"].(map[string]interface{}), fieldsAny) - if err != nil { - return err - } - - req := &distribute.IndexRequest{ - Id: docMap["id"].(string), - Fields: fieldsAny, - } - - res, err := client.Index(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } - } else { - return errors.New("argument error") - } - } - - return nil -} diff --git a/cmd/blast/dispatcher_node_health.go b/cmd/blast/dispatcher_node_health.go deleted file mode 100644 index 6594ffe..0000000 --- a/cmd/blast/dispatcher_node_health.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/mosuka/blast/dispatcher" - "github.com/mosuka/blast/protobuf/distribute" - "github.com/urfave/cli" -) - -func dispatcherNodeHealth(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - healthiness := c.Bool("healthiness") - liveness := c.Bool("liveness") - readiness := c.Bool("readiness") - - client, err := dispatcher.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - var res *distribute.NodeHealthCheckResponse - if healthiness { - req := &distribute.NodeHealthCheckRequest{Probe: distribute.NodeHealthCheckRequest_HEALTHINESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &distribute.NodeHealthCheckResponse{State: distribute.NodeHealthCheckResponse_UNHEALTHY} - } - } else if liveness { - req := &distribute.NodeHealthCheckRequest{Probe: distribute.NodeHealthCheckRequest_LIVENESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &distribute.NodeHealthCheckResponse{State: distribute.NodeHealthCheckResponse_DEAD} - } - } else if readiness { - req := &distribute.NodeHealthCheckRequest{Probe: distribute.NodeHealthCheckRequest_READINESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &distribute.NodeHealthCheckResponse{State: distribute.NodeHealthCheckResponse_NOT_READY} - } - } else { - req := &distribute.NodeHealthCheckRequest{Probe: distribute.NodeHealthCheckRequest_HEALTHINESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &distribute.NodeHealthCheckResponse{State: distribute.NodeHealthCheckResponse_UNHEALTHY} - } - } - - marshaler := dispatcher.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/dispatcher_search.go b/cmd/blast/dispatcher_search.go deleted file mode 100644 index bf6ccda..0000000 --- a/cmd/blast/dispatcher_search.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "os" - - "github.com/blevesearch/bleve" - "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/dispatcher" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/distribute" - "github.com/urfave/cli" -) - -func dispatcherSearch(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - filePath := c.String("file") - - searchRequest := bleve.NewSearchRequest(nil) - - if filePath != "" { - _, err := os.Stat(filePath) - if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error - return err - } - - // open file - file, err := os.Open(filePath) - if err != nil { - return err - } - defer func() { - _ = file.Close() - }() - - // read file - fileBytes, err := ioutil.ReadAll(file) - if err != nil { - return err - } - - // create search request - if fileBytes != nil { - var tmpValue map[string]interface{} - err = json.Unmarshal(fileBytes, &tmpValue) - if err != nil { - return err - } - searchRequestMap, ok := tmpValue["search_request"] - if !ok { - return errors.New("value does not exist") - } - searchRequestBytes, err := json.Marshal(searchRequestMap) - if err != nil { - return err - } - err = json.Unmarshal(searchRequestBytes, &searchRequest) - if err != nil { - return err - } - } - } - - client, err := dispatcher.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - searchRequestAny := &any.Any{} - err = protobuf.UnmarshalAny(searchRequest, searchRequestAny) - if err != nil { - return err - } - - req := &distribute.SearchRequest{SearchRequest: searchRequestAny} - - res, err := client.Search(req) - if err != nil { - return err - } - - marshaler := dispatcher.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/dispatcher_start.go b/cmd/blast/dispatcher_start.go deleted file mode 100644 index 4b61df3..0000000 --- a/cmd/blast/dispatcher_start.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "os" - "os/signal" - "syscall" - - "github.com/mosuka/blast/dispatcher" - "github.com/mosuka/blast/logutils" - "github.com/urfave/cli" -) - -func dispatcherStart(c *cli.Context) error { - managerAddr := c.String("manager-grpc-address") - - grpcAddr := c.String("grpc-address") - grpcGatewayAddr := c.String("grpc-gateway-address") - httpAddr := c.String("http-address") - - logLevel := c.GlobalString("log-level") - logFilename := c.GlobalString("log-file") - logMaxSize := c.GlobalInt("log-max-size") - logMaxBackups := c.GlobalInt("log-max-backups") - logMaxAge := c.GlobalInt("log-max-age") - logCompress := c.GlobalBool("log-compress") - - grpcLogLevel := c.GlobalString("grpc-log-level") - grpcLogFilename := c.GlobalString("grpc-log-file") - grpcLogMaxSize := c.GlobalInt("grpc-log-max-size") - grpcLogMaxBackups := c.GlobalInt("grpc-log-max-backups") - grpcLogMaxAge := c.GlobalInt("grpc-log-max-age") - grpcLogCompress := c.GlobalBool("grpc-log-compress") - - httpLogFilename := c.GlobalString("http-log-file") - httpLogMaxSize := c.GlobalInt("http-log-max-size") - httpLogMaxBackups := c.GlobalInt("http-log-max-backups") - httpLogMaxAge := c.GlobalInt("http-log-max-age") - httpLogCompress := c.GlobalBool("http-log-compress") - - // create logger - logger := logutils.NewLogger( - logLevel, - logFilename, - logMaxSize, - logMaxBackups, - logMaxAge, - logCompress, - ) - - // create logger - grpcLogger := logutils.NewGRPCLogger( - grpcLogLevel, - grpcLogFilename, - grpcLogMaxSize, - grpcLogMaxBackups, - grpcLogMaxAge, - grpcLogCompress, - ) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger( - httpLogFilename, - httpLogMaxSize, - httpLogMaxBackups, - httpLogMaxAge, - httpLogCompress, - ) - - svr, err := dispatcher.NewServer(managerAddr, grpcAddr, grpcGatewayAddr, httpAddr, logger, grpcLogger, httpAccessLogger) - if err != nil { - return err - } - - quitCh := make(chan os.Signal, 1) - signal.Notify(quitCh, os.Kill, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - - go svr.Start() - - <-quitCh - - svr.Stop() - - return nil -} diff --git a/cmd/blast/indexer_cluster_info.go b/cmd/blast/indexer_cluster_info.go deleted file mode 100644 index 7963655..0000000 --- a/cmd/blast/indexer_cluster_info.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/indexer" - "github.com/urfave/cli" -) - -func indexerClusterInfo(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &empty.Empty{} - - resp, err := client.ClusterInfo(req) - if err != nil { - return err - } - - marshaler := indexer.JsonMarshaler{} - respBytes, err := marshaler.Marshal(resp) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) - - return nil -} diff --git a/cmd/blast/indexer_cluster_leave.go b/cmd/blast/indexer_cluster_leave.go deleted file mode 100644 index 0793229..0000000 --- a/cmd/blast/indexer_cluster_leave.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/protobuf/index" - "github.com/urfave/cli" -) - -func indexerClusterLeave(c *cli.Context) error { - clusterGrpcAddr := c.String("manager-grpc-address") - shardId := c.String("shard-id") - peerGrpcAddr := c.String("peer-grpc-address") - - if clusterGrpcAddr != "" && shardId != "" { - // get grpc address of leader node - } else if peerGrpcAddr != "" { - // get grpc address of leader node - } - - nodeId := c.String("node-id") - - client, err := indexer.NewGRPCClient(peerGrpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &index.ClusterLeaveRequest{ - Id: nodeId, - } - - resp, err := client.ClusterLeave(req) - if err != nil { - return err - } - - marshaler := indexer.JsonMarshaler{} - respBytes, err := marshaler.Marshal(resp) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) - - return nil -} diff --git a/cmd/blast/indexer_cluster_watch.go b/cmd/blast/indexer_cluster_watch.go deleted file mode 100644 index a991b34..0000000 --- a/cmd/blast/indexer_cluster_watch.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "io" - "log" - "os" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/protobuf/index" - "github.com/urfave/cli" -) - -func indexerClusterWatch(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - marshaler := indexer.JsonMarshaler{} - - req := &empty.Empty{} - clusterInfo, err := client.ClusterInfo(req) - if err != nil { - return err - } - resp := &index.ClusterWatchResponse{ - Event: 0, - Node: nil, - Cluster: clusterInfo.Cluster, - } - respBytes, err := marshaler.Marshal(resp) - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) - - clusterWatchClient, err := client.ClusterWatch(req) - if err != nil { - return err - } - - for { - resp, err := clusterWatchClient.Recv() - if err == io.EOF { - break - } - if err != nil { - log.Println(err.Error()) - break - } - respBytes, err = marshaler.Marshal(resp) - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) - } - - return nil -} diff --git a/cmd/blast/indexer_delete.go b/cmd/blast/indexer_delete.go deleted file mode 100644 index b8aa834..0000000 --- a/cmd/blast/indexer_delete.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/protobuf/index" - "github.com/urfave/cli" -) - -func indexerDelete(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - filePath := c.String("file") - id := c.Args().Get(0) - - // create client - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - marshaler := indexer.JsonMarshaler{} - - if id != "" { - req := &index.DeleteRequest{ - Id: id, - } - resp, err := client.Delete(req) - if err != nil { - return err - } - respBytes, err := marshaler.Marshal(resp) - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) - } else { - if filePath != "" { - ids := make([]string, 0) - - _, err := os.Stat(filePath) - if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error - return err - } - - // read index mapping file - file, err := os.Open(filePath) - if err != nil { - return err - } - defer func() { - _ = file.Close() - }() - - reader := bufio.NewReader(file) - for { - docIdBytes, _, err := reader.ReadLine() - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - docId := string(docIdBytes) - if docId != "" { - ids = append(ids, docId) - } - break - } - - return err - } - docId := string(docIdBytes) - if docId != "" { - ids = append(ids, docId) - } - } - - req := &index.BulkDeleteRequest{ - Ids: ids, - } - - resp, err := client.BulkDelete(req) - if err != nil { - return err - } - - resultBytes, err := marshaler.Marshal(resp) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) - } else { - return errors.New("argument error") - } - } - - return nil -} diff --git a/cmd/blast/indexer_get.go b/cmd/blast/indexer_get.go deleted file mode 100644 index 976e4be..0000000 --- a/cmd/blast/indexer_get.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "errors" - "fmt" - "os" - - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/protobuf/index" - "github.com/urfave/cli" -) - -func indexerGet(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - id := c.Args().Get(0) - if id == "" { - err := errors.New("arguments are not correct") - return err - } - - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &index.GetRequest{ - Id: id, - } - - resp, err := client.Get(req) - if err != nil { - return err - } - - marshaler := indexer.JsonMarshaler{} - respBytes, err := marshaler.Marshal(resp) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) - - return nil -} diff --git a/cmd/blast/indexer_index.go b/cmd/blast/indexer_index.go deleted file mode 100644 index 7f5521c..0000000 --- a/cmd/blast/indexer_index.go +++ /dev/null @@ -1,272 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/index" - "github.com/urfave/cli" -) - -func indexerIndex(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - filePath := c.String("file") - bulk := c.Bool("bulk") - - // create gRPC client - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - marshaler := indexer.JsonMarshaler{} - - if c.NArg() >= 2 { - // index document by specifying ID and fields via standard input - id := c.Args().Get(0) - docSrc := c.Args().Get(1) - - var docMap map[string]interface{} - err := json.Unmarshal([]byte(docSrc), &docMap) - if err != nil { - return err - } - - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(docMap["fields"], fieldsAny) - if err != nil { - return err - } - - req := &index.IndexRequest{ - Id: id, - Fields: fieldsAny, - } - - res, err := client.Index(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } else if c.NArg() == 1 { - // index document by specifying document(s) via standard input - docSrc := c.Args().Get(0) - - if bulk { - // jsonl - docs := make([]*index.Document, 0) - reader := bufio.NewReader(bytes.NewReader([]byte(docSrc))) - for { - docBytes, err := reader.ReadBytes('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - break - } - } - - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - } - - req := &index.BulkIndexRequest{ - Documents: docs, - } - res, err := client.BulkIndex(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } else { - // json - var docMap map[string]interface{} - err := json.Unmarshal([]byte(docSrc), &docMap) - if err != nil { - return err - } - - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(docMap["fields"].(map[string]interface{}), fieldsAny) - if err != nil { - return err - } - - req := &index.IndexRequest{ - Id: docMap["id"].(string), - Fields: fieldsAny, - } - - res, err := client.Index(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } - } else { - // index document by specifying document(s) via file - if filePath != "" { - _, err := os.Stat(filePath) - if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error - return err - } - - // read index mapping file - file, err := os.Open(filePath) - if err != nil { - return err - } - defer func() { - _ = file.Close() - }() - - if bulk { - // jsonl - docs := make([]*index.Document, 0) - reader := bufio.NewReader(file) - for { - docBytes, err := reader.ReadBytes('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - break - } - } - - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - } - - req := &index.BulkIndexRequest{ - Documents: docs, - } - res, err := client.BulkIndex(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } else { - // json - docBytes, err := ioutil.ReadAll(file) - if err != nil { - return err - } - var docMap map[string]interface{} - err = json.Unmarshal(docBytes, &docMap) - if err != nil { - return err - } - - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(docMap["fields"].(map[string]interface{}), fieldsAny) - if err != nil { - return err - } - - req := &index.IndexRequest{ - Id: docMap["id"].(string), - Fields: fieldsAny, - } - - res, err := client.Index(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } - } else { - return errors.New("argument error") - } - } - - return nil -} diff --git a/cmd/blast/indexer_node_health.go b/cmd/blast/indexer_node_health.go deleted file mode 100644 index e818992..0000000 --- a/cmd/blast/indexer_node_health.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/protobuf/index" - "github.com/urfave/cli" -) - -func indexerNodeHealth(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - healthiness := c.Bool("healthiness") - liveness := c.Bool("liveness") - readiness := c.Bool("readiness") - - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - var res *index.NodeHealthCheckResponse - if healthiness { - req := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_HEALTHINESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &index.NodeHealthCheckResponse{State: index.NodeHealthCheckResponse_UNHEALTHY} - } - } else if liveness { - req := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_LIVENESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &index.NodeHealthCheckResponse{State: index.NodeHealthCheckResponse_DEAD} - } - } else if readiness { - req := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_READINESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &index.NodeHealthCheckResponse{State: index.NodeHealthCheckResponse_NOT_READY} - } - } else { - req := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_HEALTHINESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &index.NodeHealthCheckResponse{State: index.NodeHealthCheckResponse_UNHEALTHY} - } - } - - marshaler := indexer.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/indexer_node_info.go b/cmd/blast/indexer_node_info.go deleted file mode 100644 index 610403f..0000000 --- a/cmd/blast/indexer_node_info.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/indexer" - "github.com/urfave/cli" -) - -func indexerNodeInfo(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &empty.Empty{} - - res, err := client.NodeInfo(req) - if err != nil { - return err - } - - marshaler := indexer.JsonMarshaler{} - - nodeBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(nodeBytes))) - - return nil -} diff --git a/cmd/blast/indexer_search.go b/cmd/blast/indexer_search.go deleted file mode 100644 index 2a7d4b0..0000000 --- a/cmd/blast/indexer_search.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "os" - - "github.com/blevesearch/bleve" - "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/index" - "github.com/urfave/cli" -) - -func indexerSearch(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - filePath := c.String("file") - - searchRequest := bleve.NewSearchRequest(nil) - - if filePath != "" { - _, err := os.Stat(filePath) - if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error - return err - } - - // open file - file, err := os.Open(filePath) - if err != nil { - return err - } - defer func() { - _ = file.Close() - }() - - // read file - fileBytes, err := ioutil.ReadAll(file) - if err != nil { - return err - } - - // create search request - if fileBytes != nil { - var tmpValue map[string]interface{} - err = json.Unmarshal(fileBytes, &tmpValue) - if err != nil { - return err - } - searchRequestMap, ok := tmpValue["search_request"] - if !ok { - return errors.New("search_request does not exist") - } - searchRequestBytes, err := json.Marshal(searchRequestMap) - if err != nil { - return err - } - err = json.Unmarshal(searchRequestBytes, &searchRequest) - if err != nil { - return err - } - } - } - - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - searchRequestAny := &any.Any{} - err = protobuf.UnmarshalAny(searchRequest, searchRequestAny) - if err != nil { - return err - } - - req := &index.SearchRequest{SearchRequest: searchRequestAny} - - res, err := client.Search(req) - if err != nil { - return err - } - - marshaler := indexer.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/indexer_snapshot.go b/cmd/blast/indexer_snapshot.go deleted file mode 100644 index bad2cf5..0000000 --- a/cmd/blast/indexer_snapshot.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/indexer" - "github.com/urfave/cli" -) - -func indexerSnapshot(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &empty.Empty{} - - res, err := client.Snapshot(req) - if err != nil { - return err - } - - marshaler := indexer.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/indexer_start.go b/cmd/blast/indexer_start.go deleted file mode 100644 index d01b076..0000000 --- a/cmd/blast/indexer_start.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "os" - "os/signal" - "syscall" - - "github.com/blevesearch/bleve/mapping" - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/indexutils" - "github.com/mosuka/blast/logutils" - "github.com/mosuka/blast/protobuf/index" - "github.com/urfave/cli" -) - -func indexerStart(c *cli.Context) error { - managerGRPCAddr := c.String("manager-grpc-address") - shardId := c.String("shard-id") - peerGRPCAddr := c.String("peer-grpc-address") - - grpcAddr := c.String("grpc-address") - grpcGatewayAddr := c.String("grpc-gateway-address") - httpAddr := c.String("http-address") - - nodeId := c.String("node-id") - nodeAddr := c.String("node-address") - dataDir := c.String("data-dir") - raftStorageType := c.String("raft-storage-type") - - indexMappingFile := c.String("index-mapping-file") - indexType := c.String("index-type") - indexStorageType := c.String("index-storage-type") - - logLevel := c.String("log-level") - logFilename := c.String("log-file") - logMaxSize := c.Int("log-max-size") - logMaxBackups := c.Int("log-max-backups") - logMaxAge := c.Int("log-max-age") - logCompress := c.Bool("log-compress") - - grpcLogLevel := c.String("grpc-log-level") - grpcLogFilename := c.String("grpc-log-file") - grpcLogMaxSize := c.Int("grpc-log-max-size") - grpcLogMaxBackups := c.Int("grpc-log-max-backups") - grpcLogMaxAge := c.Int("grpc-log-max-age") - grpcLogCompress := c.Bool("grpc-log-compress") - - httpLogFile := c.String("http-log-file") - httpLogMaxSize := c.Int("http-log-max-size") - httpLogMaxBackups := c.Int("http-log-max-backups") - httpLogMaxAge := c.Int("http-log-max-age") - httpLogCompress := c.Bool("http-log-compress") - - // create logger - logger := logutils.NewLogger( - logLevel, - logFilename, - logMaxSize, - logMaxBackups, - logMaxAge, - logCompress, - ) - - // create logger - grpcLogger := logutils.NewGRPCLogger( - grpcLogLevel, - grpcLogFilename, - grpcLogMaxSize, - grpcLogMaxBackups, - grpcLogMaxAge, - grpcLogCompress, - ) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger( - httpLogFile, - httpLogMaxSize, - httpLogMaxBackups, - httpLogMaxAge, - httpLogCompress, - ) - - node := &index.Node{ - Id: nodeId, - BindAddress: nodeAddr, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddr, - GrpcGatewayAddress: grpcGatewayAddr, - HttpAddress: httpAddr, - }, - } - - var err error - - // create index mapping - var indexMapping *mapping.IndexMappingImpl - if indexMappingFile != "" { - indexMapping, err = indexutils.NewIndexMappingFromFile(indexMappingFile) - if err != nil { - return err - } - } else { - indexMapping = mapping.NewIndexMapping() - } - - svr, err := indexer.NewServer(managerGRPCAddr, shardId, peerGRPCAddr, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger.Named(nodeId), grpcLogger.Named(nodeId), httpAccessLogger) - if err != nil { - return err - } - - quitCh := make(chan os.Signal, 1) - signal.Notify(quitCh, os.Kill, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - - go svr.Start() - - <-quitCh - - svr.Stop() - - return nil -} diff --git a/cmd/blast/main.go b/cmd/blast/main.go deleted file mode 100644 index 7183f17..0000000 --- a/cmd/blast/main.go +++ /dev/null @@ -1,1010 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - "path" - - "github.com/blevesearch/bleve" - "github.com/mosuka/blast/version" - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - app.Name = path.Base(os.Args[0]) - app.Usage = "Command for blast" - app.Version = version.Version - app.Authors = []cli.Author{ - { - Name: "mosuka", - Email: "minoru.osuka@gmail.com", - }, - } - - app.Commands = []cli.Command{ - { - Name: "manager", - Usage: "Command for blast manager", - Subcommands: []cli.Command{ - { - Name: "start", - Usage: "Start blast manager", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "peer-grpc-address", - Value: "", - EnvVar: "BLAST_MANAGER_PEER_GRPC_ADDRESS", - Usage: "The gRPC address of the peer node that exists in the cluster to be joined", - }, - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - EnvVar: "BLAST_MANAGER_GRPC_ADDRESS", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "grpc-gateway-address", - Value: ":6100", - EnvVar: "BLAST_MANAGER_GRPC_GATEWAY_ADDRESS", - Usage: "The gRPC gateway listen address", - }, - cli.StringFlag{ - Name: "http-address", - Value: ":8100", - EnvVar: "BLAST_MANAGER_HTTP_ADDRESS", - Usage: "HTTP listen address", - }, - cli.StringFlag{ - Name: "node-id", - Value: "", - EnvVar: "BLAST_MANAGER_NODE_ID", - Usage: "Unique ID to identify the node", - }, - cli.StringFlag{ - Name: "node-address", - Value: ":2100", - EnvVar: "BLAST_MANAGER_NODE_ADDRESS", - Usage: "The address that should be bound to for internal cluster communications", - }, - cli.StringFlag{ - Name: "data-dir", - Value: "/tmp/blast/manager", - EnvVar: "BLAST_MANAGER_DATA_DIR", - Usage: "A data directory for the node to store state", - }, - cli.StringFlag{ - Name: "raft-storage-type", - Value: "boltdb", - EnvVar: "BLAST_MANAGER_RAFT_STORAGE_TYPE", - Usage: "Storage type of the database that stores the state", - }, - cli.StringFlag{ - Name: "index-mapping-file", - Value: "", - EnvVar: "BLAST_MANAGER_INDEX_MAPPING_FILE", - Usage: "An index mapping file to use", - }, - cli.StringFlag{ - Name: "index-type", - Value: bleve.Config.DefaultIndexType, - EnvVar: "BLAST_MANAGER_INDEX_TYPE", - Usage: "An index type to use", - }, - cli.StringFlag{ - Name: "index-storage-type", - Value: bleve.Config.DefaultKVStore, - EnvVar: "BLAST_MANAGER_INDEX_STORAGE_TYPE", - Usage: "An index storage type to use", - }, - cli.StringFlag{ - Name: "log-level", - Value: "INFO", - EnvVar: "BLAST_MANAGER_LOG_LEVEL", - Usage: "Log level", - }, - cli.StringFlag{ - Name: "log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_MANAGER_LOG_FILE", - Usage: "Log file", - }, - cli.IntFlag{ - Name: "log-max-size", - Value: 500, - EnvVar: "BLAST_MANAGER_LOG_MAX_SIZE", - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "log-max-backups", - Value: 3, - EnvVar: "BLAST_MANAGER_LOG_MAX_BACKUPS", - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "log-max-age", - Value: 30, - EnvVar: "BLAST_MANAGER_LOG_MAX_AGE", - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "log-compress", - EnvVar: "BLAST_MANAGER_LOG_COMPRESS", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "grpc-log-level", - Value: "WARN", - EnvVar: "BLAST_MANAGER_GRPC_LOG_LEVEL", - Usage: "gRPC log level", - }, - cli.StringFlag{ - Name: "grpc-log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_MANAGER_GRPC_LOG_FILE", - Usage: "gRPC log file", - }, - cli.IntFlag{ - Name: "grpc-log-max-size", - Value: 500, - EnvVar: "BLAST_MANAGER_GRPC_LOG_MAX_SIZE", - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "grpc-log-max-backups", - Value: 3, - EnvVar: "BLAST_MANAGER_GRPC_LOG_MAX_BACKUPS", - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "grpc-log-max-age", - Value: 30, - EnvVar: "BLAST_MANAGER_GRPC_LOG_MAX_AGE", - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "grpc-log-compress", - EnvVar: "BLAST_MANAGER_GRPC_LOG_COMPRESS", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "http-log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_MANAGER_HTTP_LOG_FILE", - Usage: "HTTP access log file", - }, - cli.IntFlag{ - Name: "http-log-max-size", - Value: 500, - EnvVar: "BLAST_MANAGER_HTTP_LOG_MAX_SIZE", - Usage: "Max size of a HTTP access log file (megabytes)", - }, - cli.IntFlag{ - Name: "http-log-max-backups", - Value: 3, - EnvVar: "BLAST_MANAGER_HTTP_LOG_MAX_BACKUPS", - Usage: "Max backup count of HTTP access log files", - }, - cli.IntFlag{ - Name: "http-log-max-age", - Value: 30, - EnvVar: "BLAST_MANAGER_HTTP_LOG_MAX_AGE", - Usage: "Max age of a HTTP access log file (days)", - }, - cli.BoolFlag{ - Name: "http-log-compress", - EnvVar: "BLAST_MANAGER_HTTP_LOG_COMPRESS", - Usage: "Compress a HTTP access log", - }, - }, - Action: managerStart, - }, - { - Name: "node", - Usage: "Command for blast manager node", - Subcommands: []cli.Command{ - { - Name: "info", - Usage: "Get node information", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - Usage: "The gRPC address of the node for which to retrieve the node information", - }, - }, - Action: managerNodeInfo, - }, - { - Name: "healthcheck", - Usage: "Health check the node", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - Usage: "The gRPC listen address", - }, - cli.BoolFlag{ - Name: "healthiness", - Usage: "healthiness probe", - }, - cli.BoolFlag{ - Name: "liveness", - Usage: "Liveness probe", - }, - cli.BoolFlag{ - Name: "readiness", - Usage: "Readiness probe", - }, - }, - Action: managerNodeHealthCheck, - }, - }, - }, - { - Name: "cluster", - Usage: "Command for blast manager cluster", - Subcommands: []cli.Command{ - { - Name: "info", - Usage: "Get cluster information", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - Usage: "The gRPC address of the node for which to retrieve the node information", - }, - }, - Action: managerClusterInfo, - }, - { - Name: "watch", - Usage: "Watch peers", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - Usage: "The gRPC address of the node for which to retrieve the node information", - }, - }, - Action: managerClusterWatch, - }, - { - Name: "leave", - Usage: "Leave the manager from the cluster", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "peer-grpc-address", - Value: "", - Usage: "The gRPC address of the peer node that exists in the cluster to be joined", - }, - cli.StringFlag{ - Name: "node-id", - Value: "", - Usage: "The gRPC listen address", - }, - }, - Action: managerClusterLeave, - }, - }, - }, - { - Name: "get", - Usage: "Get data", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - Usage: "The gRPC listen address", - }, - }, - ArgsUsage: "[key]", - Action: managerGet, - }, - { - Name: "set", - Usage: "Set data", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "file", - Value: "", - Usage: "Value file", - }, - }, - ArgsUsage: "[key] [value]", - Action: managerSet, - }, - { - Name: "delete", - Usage: "Delete data", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - Usage: "The gRPC listen address", - }, - }, - ArgsUsage: "[key]", - Action: managerDelete, - }, - { - Name: "watch", - Usage: "Watch data", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - Usage: "The gRPC listen address", - }, - }, - ArgsUsage: "[key]", - Action: managerWatch, - }, - { - Name: "snapshot", - Usage: "Snapshot the data", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - Usage: "The gRPC listen address", - }, - }, - Action: managerSnapshot, - }, - }, - }, - { - Name: "indexer", - Usage: "Command for blast indexer", - Subcommands: []cli.Command{ - { - Name: "start", - Usage: "Start blast indexer", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "manager-grpc-address", - Value: "", - EnvVar: "BLAST_INDEXER_MANAGER_GRPC_ADDRESS", - Usage: "The gRPC address of the existing cluster manager to be joined", - }, - cli.StringFlag{ - Name: "shard-id", - Value: "", - EnvVar: "BLAST_INDEXER_SHARD_ID", - Usage: "Shard ID registered in the existing cluster to be joined", - }, - cli.StringFlag{ - Name: "peer-grpc-address", - Value: "", - EnvVar: "BLAST_INDEXER_PEER_GRPC_ADDRESS", - Usage: "The gRPC address of the peer node that exists in the cluster to be joined", - }, - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - EnvVar: "BLAST_INDEXER_GRPC_ADDRESS", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "grpc-gateway-address", - Value: ":6000", - EnvVar: "BLAST_INDEXER_GRPC_GATEWAY_ADDRESS", - Usage: "The gRPC gateway listen address", - }, - cli.StringFlag{ - Name: "http-address", - Value: ":8000", - EnvVar: "BLAST_INDEXER_HTTP_ADDRESS", - Usage: "HTTP listen address", - }, - cli.StringFlag{ - Name: "node-id", - Value: "", - EnvVar: "BLAST_INDEXER_NODE_ID", - Usage: "Unique ID to identify the node", - }, - cli.StringFlag{ - Name: "node-address", - Value: ":2000", - EnvVar: "BLAST_INDEXER_NODE_ADDRESS", - Usage: "The address that should be bound to for internal cluster communications", - }, - cli.StringFlag{ - Name: "data-dir", - Value: "/tmp/blast/indexer", - EnvVar: "BLAST_INDEXER_DATA_DIR", - Usage: "A data directory for the node to store state", - }, - cli.StringFlag{ - Name: "raft-storage-type", - Value: "boltdb", - EnvVar: "BLAST_INDEXER_RAFT_STORAGE_TYPE", - Usage: "Storage type of the database that stores the state", - }, - cli.StringFlag{ - Name: "index-mapping-file", - Value: "", - EnvVar: "BLAST_INDEXER_INDEX_MAPPING_FILE", - Usage: "An index mapping file to use", - }, - cli.StringFlag{ - Name: "index-type", - Value: bleve.Config.DefaultIndexType, - EnvVar: "BLAST_INDEXER_INDEX_TYPE", - Usage: "An index type to use", - }, - cli.StringFlag{ - Name: "index-storage-type", - Value: bleve.Config.DefaultKVStore, - EnvVar: "BLAST_INDEXER_INDEX_STORAGE_TYPE", - Usage: "An index storage type to use", - }, - cli.StringFlag{ - Name: "log-level", - Value: "INFO", - EnvVar: "BLAST_INDEXER_LOG_LEVEL", - Usage: "Log level", - }, - cli.StringFlag{ - Name: "log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_INDEXER_LOG_FILE", - Usage: "Log file", - }, - cli.IntFlag{ - Name: "log-max-size", - Value: 500, - EnvVar: "BLAST_INDEXER_LOG_MAX_SIZE", - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "log-max-backups", - Value: 3, - EnvVar: "BLAST_INDEXER_LOG_MAX_BACKUPS", - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "log-max-age", - Value: 30, - EnvVar: "BLAST_INDEXER_LOG_MAX_AGE", - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "log-compress", - EnvVar: "BLAST_INDEXER_LOG_COMPRESS", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "grpc-log-level", - Value: "WARN", - EnvVar: "BLAST_INDEXER_GRPC_LOG_LEVEL", - Usage: "gRPC log level", - }, - cli.StringFlag{ - Name: "grpc-log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_INDEXER_GRPC_LOG_FILE", - Usage: "gRPC log file", - }, - cli.IntFlag{ - Name: "grpc-log-max-size", - Value: 500, - EnvVar: "BLAST_INDEXER_GRPC_LOG_MAX_SIZE", - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "grpc-log-max-backups", - Value: 3, - EnvVar: "BLAST_INDEXER_GRPC_LOG_MAX_BACKUPS", - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "grpc-log-max-age", - Value: 30, - EnvVar: "BLAST_INDEXER_GRPC_LOG_MAX_AGE", - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "grpc-log-compress", - EnvVar: "BLAST_INDEXER_GRPC_LOG_COMPRESS", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "http-log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_INDEXER_HTTP_LOG_FILE", - Usage: "HTTP access log file", - }, - cli.IntFlag{ - Name: "http-log-max-size", - Value: 500, - EnvVar: "BLAST_INDEXER_HTTP_LOG_MAX_SIZE", - Usage: "Max size of a HTTP access log file (megabytes)", - }, - cli.IntFlag{ - Name: "http-log-max-backups", - Value: 3, - EnvVar: "BLAST_INDEXER_HTTP_LOG_MAX_BACKUPS", - Usage: "Max backup count of HTTP access log files", - }, - cli.IntFlag{ - Name: "http-log-max-age", - Value: 30, - EnvVar: "BLAST_INDEXER_HTTP_LOG_MAX_AGE", - Usage: "Max age of a HTTP access log file (days)", - }, - cli.BoolFlag{ - Name: "http-log-compress", - EnvVar: "BLAST_INDEXER_HTTP_LOG_COMPRESS", - Usage: "Compress a HTTP access log", - }, - }, - Action: indexerStart, - }, - { - Name: "node", - Usage: "Command for blast indexer node", - Subcommands: []cli.Command{ - { - Name: "info", - Usage: "Get node information", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - Usage: "The gRPC address of the node for which to retrieve the node information", - }, - }, - Action: indexerNodeInfo, - }, - { - Name: "healthcheck", - Usage: "Health check the node", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - Usage: "The gRPC listen address", - }, - cli.BoolFlag{ - Name: "healthiness", - Usage: "healthiness probe", - }, - cli.BoolFlag{ - Name: "liveness", - Usage: "Liveness probe", - }, - cli.BoolFlag{ - Name: "readiness", - Usage: "Readiness probe", - }, - }, - Action: indexerNodeHealth, - }, - }, - }, - { - Name: "cluster", - Usage: "Command for blast indexer cluster", - Subcommands: []cli.Command{ - { - Name: "info", - Usage: "Get cluster information", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - Usage: "The gRPC address of the node for which to retrieve the node information", - }, - }, - Action: indexerClusterInfo, - }, - { - Name: "watch", - Usage: "Watch cluster", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - Usage: "The gRPC address of the node for which to retrieve the node information", - }, - }, - Action: indexerClusterWatch, - }, - { - Name: "leave", - Usage: "Leave the indexer from the cluster", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "manager-grpc-address", - Value: "", - Usage: "The gRPC address of the existing cluster node to be joined", - }, - cli.StringFlag{ - Name: "shard-id", - Value: "", - Usage: "Shard ID registered in the existing cluster to be joined", - }, - cli.StringFlag{ - Name: "peer-grpc-address", - Value: "", - Usage: "The gRPC address of the peer node that exists in the cluster to be joined", - }, - cli.StringFlag{ - Name: "node-id", - Value: "", - Usage: "Node ID to delete", - }, - }, - Action: indexerClusterLeave, - }, - }, - }, - { - Name: "get", - Usage: "Get document(s)", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "file", - Value: "", - Usage: "Document ID list", - }, - }, - ArgsUsage: "[document ID]", - Action: indexerGet, - }, - { - Name: "index", - Usage: "Index document(s)", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "file", - Value: "", - Usage: "Document list", - }, - cli.BoolFlag{ - Name: "bulk", - Usage: "Bulk indexing", - }, - }, - ArgsUsage: "[document ID] [document fields]", - Action: indexerIndex, - }, - { - Name: "delete", - Usage: "Delete document(s)", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "file", - Value: "", - Usage: "Document ID list", - }, - }, - ArgsUsage: "[document ID]", - Action: indexerDelete, - }, - { - Name: "search", - Usage: "Search document(s)", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "file", - Value: "", - Usage: "Search request", - }, - }, - ArgsUsage: "[search request]", - Action: indexerSearch, - }, - { - Name: "snapshot", - Usage: "Snapshot", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - Usage: "The gRPC listen address", - }, - }, - Action: indexerSnapshot, - }, - }, - }, - { - Name: "dispatcher", - Usage: "Command for blast dispatcher", - Subcommands: []cli.Command{ - { - Name: "start", - Usage: "Start blast dispatcher", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "manager-grpc-address", - Value: ":5100", - EnvVar: "BLAST_DISPATCHER_CLUSTER_GRPC_ADDRESS", - Usage: "The gRPC address of the existing cluster node to be joined", - }, - cli.StringFlag{ - Name: "grpc-address", - Value: ":5200", - EnvVar: "BLAST_DISPATCHER_GRPC_ADDRESS", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "grpc-gateway-address", - Value: ":6200", - EnvVar: "BLAST_DISPATCHER_GRPC_GATEWAY_ADDRESS", - Usage: "The gRPC gateway listen address", - }, - cli.StringFlag{ - Name: "http-address", - Value: ":8200", - EnvVar: "BLAST_DISPATCHER_HTTP_ADDRESS", - Usage: "HTTP listen address", - }, - cli.StringFlag{ - Name: "log-level", - Value: "INFO", - EnvVar: "BLAST_DISPATCHER_LOG_LEVEL", - Usage: "Log level", - }, - cli.StringFlag{ - Name: "log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_DISPATCHER_LOG_FILE", - Usage: "Log file", - }, - cli.IntFlag{ - Name: "log-max-size", - Value: 500, - EnvVar: "BLAST_DISPATCHER_LOG_MAX_SIZE", - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "log-max-backups", - Value: 3, - EnvVar: "BLAST_DISPATCHER_LOG_MAX_BACKUPS", - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "log-max-age", - Value: 30, - EnvVar: "BLAST_DISPATCHER_LOG_MAX_AGE", - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "log-compress", - EnvVar: "BLAST_DISPATCHER_LOG_COMPRESS", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "grpc-log-level", - Value: "WARN", - EnvVar: "BLAST_DISPATCHER_GRPC_LOG_LEVEL", - Usage: "gRPC log level", - }, - cli.StringFlag{ - Name: "grpc-log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_DISPATCHER_GRPC_LOG_FILE", - Usage: "gRPC log file", - }, - cli.IntFlag{ - Name: "grpc-log-max-size", - Value: 500, - EnvVar: "BLAST_DISPATCHER_GRPC_LOG_MAX_SIZE", - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "grpc-log-max-backups", - Value: 3, - EnvVar: "BLAST_DISPATCHER_GRPC_LOG_MAX_BACKUPS", - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "grpc-log-max-age", - Value: 30, - EnvVar: "BLAST_DISPATCHER_GRPC_LOG_MAX_AGE", - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "grpc-log-compress", - EnvVar: "BLAST_DISPATCHER_GRPC_LOG_COMPRESS", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "http-log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_DISPATCHER_HTTP_LOG_FILE", - Usage: "HTTP access log file", - }, - cli.IntFlag{ - Name: "http-log-max-size", - Value: 500, - EnvVar: "BLAST_DISPATCHER_HTTP_LOG_MAX_SIZE", - Usage: "Max size of a HTTP access log file (megabytes)", - }, - cli.IntFlag{ - Name: "http-log-max-backups", - Value: 3, - EnvVar: "BLAST_DISPATCHER_HTTP_LOG_MAX_BACKUPS", - Usage: "Max backup count of HTTP access log files", - }, - cli.IntFlag{ - Name: "http-log-max-age", - Value: 30, - EnvVar: "BLAST_DISPATCHER_HTTP_LOG_MAX_AGE", - Usage: "Max age of a HTTP access log file (days)", - }, - cli.BoolFlag{ - Name: "http-log-compress", - EnvVar: "BLAST_DISPATCHER_HTTP_LOG_COMPRESS", - Usage: "Compress a HTTP access log", - }, - }, - Action: dispatcherStart, - }, - { - Name: "node", - Usage: "Command for blast dispatcher node", - Subcommands: []cli.Command{ - { - Name: "healthcheck", - Usage: "Health check the node", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5200", - Usage: "The gRPC listen address", - }, - cli.BoolFlag{ - Name: "healthiness", - Usage: "healthiness probe", - }, - cli.BoolFlag{ - Name: "liveness", - Usage: "Liveness probe", - }, - cli.BoolFlag{ - Name: "readiness", - Usage: "Readiness probe", - }, - }, - Action: dispatcherNodeHealth, - }, - }, - }, - { - Name: "get", - Usage: "Get document(s)", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5200", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "file", - Value: "", - Usage: "Document ID list", - }, - }, - ArgsUsage: "[document IDs]", - Action: dispatcherGet, - }, - { - Name: "index", - Usage: "Index document(s)", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5200", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "file", - Value: "", - Usage: "Document list", - }, - cli.BoolFlag{ - Name: "bulk", - Usage: "Bulk indexing", - }, - }, - ArgsUsage: "[document ID] [document fields]", - Action: dispatcherIndex, - }, - { - Name: "delete", - Usage: "Delete document(s)", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5200", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "file", - Value: "", - Usage: "Document ID list", - }, - }, - ArgsUsage: "[document IDs]", - Action: dispatcherDelete, - }, - { - Name: "search", - Usage: "Search document(s)", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5200", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "file", - Value: "", - Usage: "Search request", - }, - }, - ArgsUsage: "[search request]", - Action: dispatcherSearch, - }, - }, - }, - } - - cli.HelpFlag = cli.BoolFlag{ - Name: "help, h", - Usage: "Show this message", - } - cli.VersionFlag = cli.BoolFlag{ - Name: "version, v", - Usage: "Print the version", - } - - err := app.Run(os.Args) - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } -} diff --git a/cmd/blast/manager_cluster_info.go b/cmd/blast/manager_cluster_info.go deleted file mode 100644 index 8b0a25a..0000000 --- a/cmd/blast/manager_cluster_info.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/manager" - "github.com/urfave/cli" -) - -func managerClusterInfo(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - client, err := manager.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &empty.Empty{} - res, err := client.ClusterInfo(req) - if err != nil { - return err - } - - marshaler := manager.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/manager_cluster_leave.go b/cmd/blast/manager_cluster_leave.go deleted file mode 100644 index 12ae8e1..0000000 --- a/cmd/blast/manager_cluster_leave.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf/management" - "github.com/urfave/cli" -) - -func managerClusterLeave(c *cli.Context) error { - peerGrpcAddr := c.String("peer-grpc-address") - - if peerGrpcAddr != "" { - // get grpc address of leader node - } - - nodeId := c.String("node-id") - - client, err := manager.NewGRPCClient(peerGrpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &management.ClusterLeaveRequest{ - Id: nodeId, - } - res, err := client.ClusterLeave(req) - if err != nil { - return err - } - - marshaler := manager.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/manager_cluster_watch.go b/cmd/blast/manager_cluster_watch.go deleted file mode 100644 index 320965c..0000000 --- a/cmd/blast/manager_cluster_watch.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "io" - "log" - "os" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf/management" - "github.com/urfave/cli" -) - -func managerClusterWatch(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - client, err := manager.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - marshaler := manager.JsonMarshaler{} - - req := &empty.Empty{} - res, err := client.ClusterInfo(req) - if err != nil { - return err - } - resp := &management.ClusterWatchResponse{ - Event: 0, - Node: nil, - Cluster: res.Cluster, - } - resBytes, err := marshaler.Marshal(resp) - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - watchClient, err := client.ClusterWatch(req) - if err != nil { - return err - } - - for { - resp, err = watchClient.Recv() - if err == io.EOF { - break - } - if err != nil { - log.Println(err.Error()) - break - } - - resBytes, err = marshaler.Marshal(resp) - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } - - return nil -} diff --git a/cmd/blast/manager_delete.go b/cmd/blast/manager_delete.go deleted file mode 100644 index 0caf391..0000000 --- a/cmd/blast/manager_delete.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "errors" - "fmt" - "os" - - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf/management" - "github.com/urfave/cli" -) - -func managerDelete(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - key := c.Args().Get(0) - if key == "" { - err := errors.New("key argument must be set") - return err - } - - client, err := manager.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &management.DeleteRequest{ - Key: key, - } - res, err := client.Delete(req) - if err != nil { - return err - } - - marshaler := manager.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/manager_get.go b/cmd/blast/manager_get.go deleted file mode 100644 index 6b41f0e..0000000 --- a/cmd/blast/manager_get.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf/management" - "github.com/urfave/cli" -) - -func managerGet(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - key := c.Args().Get(0) - - client, err := manager.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &management.GetRequest{ - Key: key, - } - - res, err := client.Get(req) - if err != nil { - return err - } - - marshaler := manager.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/manager_node_health.go b/cmd/blast/manager_node_health.go deleted file mode 100644 index e2eb209..0000000 --- a/cmd/blast/manager_node_health.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf/management" - "github.com/urfave/cli" -) - -func managerNodeHealthCheck(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - healthiness := c.Bool("healthiness") - liveness := c.Bool("liveness") - readiness := c.Bool("readiness") - - client, err := manager.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - var res *management.NodeHealthCheckResponse - if healthiness { - req := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_HEALTHINESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &management.NodeHealthCheckResponse{State: management.NodeHealthCheckResponse_UNHEALTHY} - } - } else if liveness { - req := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_LIVENESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &management.NodeHealthCheckResponse{State: management.NodeHealthCheckResponse_DEAD} - } - } else if readiness { - req := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_READINESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &management.NodeHealthCheckResponse{State: management.NodeHealthCheckResponse_NOT_READY} - } - } else { - req := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_HEALTHINESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &management.NodeHealthCheckResponse{State: management.NodeHealthCheckResponse_UNHEALTHY} - } - } - - marshaler := manager.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/manager_node_info.go b/cmd/blast/manager_node_info.go deleted file mode 100644 index ca190e1..0000000 --- a/cmd/blast/manager_node_info.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/manager" - "github.com/urfave/cli" -) - -func managerNodeInfo(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - client, err := manager.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &empty.Empty{} - res, err := client.NodeInfo(req) - if err != nil { - return err - } - - marshaler := manager.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/manager_set.go b/cmd/blast/manager_set.go deleted file mode 100644 index f7bdac8..0000000 --- a/cmd/blast/manager_set.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "errors" - "fmt" - "os" - - "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/management" - "github.com/urfave/cli" -) - -func managerSet(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - key := c.Args().Get(0) - if key == "" { - err := errors.New("key argument must be set") - return err - } - - valueStr := c.Args().Get(1) - if valueStr == "" { - err := errors.New("value argument must be set") - return err - } - - var value interface{} - err := json.Unmarshal([]byte(valueStr), &value) - if err != nil { - switch err.(type) { - case *json.SyntaxError: - value = valueStr - default: - return err - } - } - - client, err := manager.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - valueAny := &any.Any{} - err = protobuf.UnmarshalAny(value, valueAny) - if err != nil { - return err - } - - req := &management.SetRequest{ - Key: key, - Value: valueAny, - } - - res, err := client.Set(req) - if err != nil { - return err - } - - marshaler := manager.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/manager_snapshot.go b/cmd/blast/manager_snapshot.go deleted file mode 100644 index f252e34..0000000 --- a/cmd/blast/manager_snapshot.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/manager" - "github.com/urfave/cli" -) - -func managerSnapshot(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - client, err := manager.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &empty.Empty{} - res, err := client.Snapshot(req) - if err != nil { - return err - } - - marshaler := manager.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/manager_start.go b/cmd/blast/manager_start.go deleted file mode 100644 index 94a7445..0000000 --- a/cmd/blast/manager_start.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "os" - "os/signal" - "syscall" - - "github.com/blevesearch/bleve/mapping" - "github.com/mosuka/blast/indexutils" - "github.com/mosuka/blast/logutils" - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf/management" - "github.com/urfave/cli" -) - -func managerStart(c *cli.Context) error { - peerGrpcAddr := c.String("peer-grpc-address") - - grpcAddr := c.String("grpc-address") - grpcGatewayAddr := c.String("grpc-gateway-address") - httpAddr := c.String("http-address") - - nodeId := c.String("node-id") - nodeAddr := c.String("node-address") - dataDir := c.String("data-dir") - raftStorageType := c.String("raft-storage-type") - - indexMappingFile := c.String("index-mapping-file") - indexType := c.String("index-type") - indexStorageType := c.String("index-storage-type") - - logLevel := c.String("log-level") - logFilename := c.String("log-file") - logMaxSize := c.Int("log-max-size") - logMaxBackups := c.Int("log-max-backups") - logMaxAge := c.Int("log-max-age") - logCompress := c.Bool("log-compress") - - grpcLogLevel := c.String("grpc-log-level") - grpcLogFilename := c.String("grpc-log-file") - grpcLogMaxSize := c.Int("grpc-log-max-size") - grpcLogMaxBackups := c.Int("grpc-log-max-backups") - grpcLogMaxAge := c.Int("grpc-log-max-age") - grpcLogCompress := c.Bool("grpc-log-compress") - - httpLogFilename := c.String("http-log-file") - httpLogMaxSize := c.Int("http-log-max-size") - httpLogMaxBackups := c.Int("http-log-max-backups") - httpLogMaxAge := c.Int("http-log-max-age") - httpLogCompress := c.Bool("http-log-compress") - - // create logger - logger := logutils.NewLogger( - logLevel, - logFilename, - logMaxSize, - logMaxBackups, - logMaxAge, - logCompress, - ) - - // create logger - grpcLogger := logutils.NewGRPCLogger( - grpcLogLevel, - grpcLogFilename, - grpcLogMaxSize, - grpcLogMaxBackups, - grpcLogMaxAge, - grpcLogCompress, - ) - - // create HTTP access logger - httpLogger := logutils.NewApacheCombinedLogger( - httpLogFilename, - httpLogMaxSize, - httpLogMaxBackups, - httpLogMaxAge, - httpLogCompress, - ) - - node := &management.Node{ - Id: nodeId, - BindAddress: nodeAddr, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddr, - GrpcGatewayAddress: grpcGatewayAddr, - HttpAddress: httpAddr, - }, - } - - var err error - - // create index mapping - var indexMapping *mapping.IndexMappingImpl - if indexMappingFile != "" { - indexMapping, err = indexutils.NewIndexMappingFromFile(indexMappingFile) - if err != nil { - return err - } - } else { - indexMapping = mapping.NewIndexMapping() - } - - svr, err := manager.NewServer(peerGrpcAddr, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger.Named(nodeId), grpcLogger.Named(nodeId), httpLogger) - if err != nil { - return err - } - - quitCh := make(chan os.Signal, 1) - signal.Notify(quitCh, os.Kill, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - - go svr.Start() - - <-quitCh - - svr.Stop() - - return nil -} diff --git a/cmd/blast/manager_watch.go b/cmd/blast/manager_watch.go deleted file mode 100644 index ff010df..0000000 --- a/cmd/blast/manager_watch.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "io" - "log" - "os" - - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf/management" - "github.com/urfave/cli" -) - -func managerWatch(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - key := c.Args().Get(0) - - client, err := manager.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &management.WatchRequest{ - Key: key, - } - watchClient, err := client.Watch(req) - if err != nil { - return err - } - - marshaler := manager.JsonMarshaler{} - - for { - resp, err := watchClient.Recv() - if err == io.EOF { - break - } - if err != nil { - log.Println(err.Error()) - break - } - - respBytes, err := marshaler.Marshal(resp) - if err != nil { - log.Println(err.Error()) - break - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) - } - - return nil -} diff --git a/cmd/bulk_delete.go b/cmd/bulk_delete.go new file mode 100644 index 0000000..603eeb9 --- /dev/null +++ b/cmd/bulk_delete.go @@ -0,0 +1,129 @@ +package cmd + +import ( + "bufio" + "context" + "fmt" + "io" + "os" + "strings" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + bulkDeleteCmd = &cobra.Command{ + Use: "bulk-delete", + Short: "Delete a document", + Long: "Delete a document", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + req := &protobuf.BulkDeleteRequest{ + Requests: make([]*protobuf.DeleteRequest, 0), + } + + var reader *bufio.Reader + if file != "" { + // from file + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() + reader = bufio.NewReader(f) + } else { + // from stdin + reader = bufio.NewReader(os.Stdin) + } + + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + r := &protobuf.DeleteRequest{ + Id: strings.TrimSpace(string(docBytes)), + } + req.Requests = append(req.Requests, r) + } + break + } + } + if len(docBytes) > 0 { + r := &protobuf.DeleteRequest{ + Id: strings.TrimSpace(string(docBytes)), + } + req.Requests = append(req.Requests, r) + } + } + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + count, err := c.BulkDelete(req) + if err != nil { + return err + } + + fmt.Println(count) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(bulkDeleteCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + bulkDeleteCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + bulkDeleteCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + bulkDeleteCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + bulkDeleteCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + bulkDeleteCmd.PersistentFlags().StringVar(&file, "file", "", "path to the file that documents have written in NDJSON(JSONL) format") + + _ = viper.BindPFlag("grpc_address", bulkDeleteCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", bulkDeleteCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", bulkDeleteCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/bulk_index.go b/cmd/bulk_index.go new file mode 100644 index 0000000..56293b0 --- /dev/null +++ b/cmd/bulk_index.go @@ -0,0 +1,135 @@ +package cmd + +import ( + "bufio" + "context" + "fmt" + "io" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + bulkIndexCmd = &cobra.Command{ + Use: "bulk-index", + Short: "Index documents in bulk", + Long: "Index documents in bulk", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + m := marshaler.BlastMarshaler{} + + req := &protobuf.BulkIndexRequest{ + Requests: make([]*protobuf.SetRequest, 0), + } + + var reader *bufio.Reader + if file != "" { + // from file + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() + reader = bufio.NewReader(f) + } else { + // from stdin + reader = bufio.NewReader(os.Stdin) + } + + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + r := &protobuf.SetRequest{} + err := m.Unmarshal(docBytes, r) + if err != nil { + continue + } + req.Requests = append(req.Requests, r) + } + break + } + } + if len(docBytes) > 0 { + r := &protobuf.SetRequest{} + err := m.Unmarshal(docBytes, r) + if err != nil { + continue + } + req.Requests = append(req.Requests, r) + } + } + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + count, err := c.BulkIndex(req) + if err != nil { + return err + } + + fmt.Println(count) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(bulkIndexCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + bulkIndexCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + bulkIndexCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + bulkIndexCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + bulkIndexCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + bulkIndexCmd.PersistentFlags().StringVar(&file, "file", "", "path to the file that documents have written in NDJSON(JSONL) format") + + _ = viper.BindPFlag("grpc_address", bulkIndexCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", bulkIndexCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", bulkIndexCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/cluster.go b/cmd/cluster.go new file mode 100644 index 0000000..ef78f42 --- /dev/null +++ b/cmd/cluster.go @@ -0,0 +1,90 @@ +package cmd + +import ( + "context" + "encoding/json" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + clusterCmd = &cobra.Command{ + Use: "cluster", + Short: "Get the cluster info", + Long: "Get the cluster info", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + resp, err := c.Cluster() + if err != nil { + return err + } + + respBytes, err := json.Marshal(resp) + if err != nil { + return err + } + + fmt.Println(string(respBytes)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(clusterCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + clusterCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + clusterCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + clusterCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + clusterCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", clusterCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", clusterCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", clusterCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/delete.go b/cmd/delete.go new file mode 100644 index 0000000..ea21b04 --- /dev/null +++ b/cmd/delete.go @@ -0,0 +1,89 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + deleteCmd = &cobra.Command{ + Use: "delete ID", + Args: cobra.ExactArgs(1), + Short: "Delete a document", + Long: "Delete a document", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + id := args[0] + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + req := &protobuf.DeleteRequest{ + Id: id, + } + + if err := c.Delete(req); err != nil { + return err + } + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(deleteCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + deleteCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + deleteCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + deleteCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + deleteCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", deleteCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", deleteCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", deleteCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/get.go b/cmd/get.go new file mode 100644 index 0000000..99a62c0 --- /dev/null +++ b/cmd/get.go @@ -0,0 +1,99 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + getCmd = &cobra.Command{ + Use: "get ID", + Args: cobra.ExactArgs(1), + Short: "Get a document", + Long: "Get a document", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + id := args[0] + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + req := &protobuf.GetRequest{ + Id: id, + } + + resp, err := c.Get(req) + if err != nil { + return err + } + + m := marshaler.BlastMarshaler{} + respBytes, err := m.Marshal(resp) + if err != nil { + return err + } + + fmt.Println(string(respBytes)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(getCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + getCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + getCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + getCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + getCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", getCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", getCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", getCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/healthcheck.go b/cmd/healthcheck.go new file mode 100644 index 0000000..ffe28a6 --- /dev/null +++ b/cmd/healthcheck.go @@ -0,0 +1,100 @@ +package cmd + +import ( + "context" + "encoding/json" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + healthCheckCmd = &cobra.Command{ + Use: "healthcheck", + Short: "Health check a node", + Long: "Health check a node", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + lResp, err := c.LivenessCheck() + if err != nil { + return err + } + + rResp, err := c.ReadinessCheck() + if err != nil { + return err + } + + resp := map[string]bool{ + "liveness": lResp.Alive, + "readiness:": rResp.Ready, + } + + respBytes, err := json.Marshal(resp) + if err != nil { + return err + } + + fmt.Println(string(respBytes)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(healthCheckCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + healthCheckCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + healthCheckCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + healthCheckCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + healthCheckCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", healthCheckCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", healthCheckCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", healthCheckCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/join.go b/cmd/join.go new file mode 100644 index 0000000..81bd84d --- /dev/null +++ b/cmd/join.go @@ -0,0 +1,104 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + joinCmd = &cobra.Command{ + Use: "join ID GRPC_ADDRESS", + Args: cobra.ExactArgs(2), + Short: "Join a node to the cluster", + Long: "Join a node to the cluster", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + id := args[0] + targetGrpcAddress := args[1] + + t, err := client.NewGRPCClientWithContextTLS(targetGrpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = t.Close() + }() + + nodeResp, err := t.Node() + if err != nil { + return err + } + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + req := &protobuf.JoinRequest{ + Id: id, + Node: nodeResp.Node, + } + + if err := c.Join(req); err != nil { + return err + } + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(joinCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + joinCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + joinCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + joinCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + joinCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", joinCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", joinCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", joinCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/leave.go b/cmd/leave.go new file mode 100644 index 0000000..42d8ffa --- /dev/null +++ b/cmd/leave.go @@ -0,0 +1,89 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + leaveCmd = &cobra.Command{ + Use: "leave ID", + Args: cobra.ExactArgs(1), + Short: "Leave a node from the cluster", + Long: "Leave a node from the cluster", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + id := args[0] + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + req := &protobuf.LeaveRequest{ + Id: id, + } + + if err := c.Leave(req); err != nil { + return err + } + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(leaveCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in config search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + leaveCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + leaveCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + leaveCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + leaveCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", leaveCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", leaveCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", leaveCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/mapping.go b/cmd/mapping.go new file mode 100644 index 0000000..bbf116d --- /dev/null +++ b/cmd/mapping.go @@ -0,0 +1,84 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + mappingCmd = &cobra.Command{ + Use: "mapping", + Short: "Get the index mapping", + Long: "Get the index mapping", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + resp, err := c.Mapping() + if err != nil { + return err + } + + fmt.Println(string(resp.Mapping)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(mappingCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + mappingCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + mappingCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + mappingCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + mappingCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", mappingCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", mappingCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", mappingCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/metrics.go b/cmd/metrics.go new file mode 100644 index 0000000..425d564 --- /dev/null +++ b/cmd/metrics.go @@ -0,0 +1,84 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + metricsCmd = &cobra.Command{ + Use: "metrics", + Short: "Get the node metrics", + Long: "Get the node metrics in Prometheus exposition format", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + resp, err := c.Metrics() + if err != nil { + return err + } + + fmt.Println(string(resp.Metrics)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(metricsCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + metricsCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + metricsCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + metricsCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + metricsCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", metricsCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", metricsCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", metricsCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/node.go b/cmd/node.go new file mode 100644 index 0000000..572c512 --- /dev/null +++ b/cmd/node.go @@ -0,0 +1,90 @@ +package cmd + +import ( + "context" + "encoding/json" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + nodeCmd = &cobra.Command{ + Use: "node", + Short: "Get the node info", + Long: "Get the node info", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + resp, err := c.Node() + if err != nil { + return err + } + + respBytes, err := json.Marshal(resp) + if err != nil { + return err + } + + fmt.Println(string(respBytes)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(nodeCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + nodeCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + nodeCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + nodeCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + nodeCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", nodeCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", nodeCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", nodeCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/root.go b/cmd/root.go new file mode 100644 index 0000000..f2c7120 --- /dev/null +++ b/cmd/root.go @@ -0,0 +1,17 @@ +package cmd + +import ( + "github.com/spf13/cobra" +) + +var ( + rootCmd = &cobra.Command{ + Use: "blast", + Short: "The lightweight distributed search server", + Long: "The lightweight distributed search server", + } +) + +func Execute() error { + return rootCmd.Execute() +} diff --git a/cmd/search.go b/cmd/search.go new file mode 100644 index 0000000..e62b15b --- /dev/null +++ b/cmd/search.go @@ -0,0 +1,101 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + searchCmd = &cobra.Command{ + Use: "search REQUEST", + Args: cobra.ExactArgs(1), + Short: "Get a document", + Long: "Get a document", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + searchRequest := args[0] + + m := marshaler.BlastMarshaler{} + + req := &protobuf.SearchRequest{} + if err := m.Unmarshal([]byte(searchRequest), req); err != nil { + return err + } + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + resp, err := c.Search(req) + if err != nil { + return err + } + + respBytes, err := m.Marshal(resp) + if err != nil { + return err + } + + fmt.Println(string(respBytes)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(searchCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + searchCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + searchCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + searchCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + searchCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", searchCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", searchCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", searchCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/set.go b/cmd/set.go new file mode 100644 index 0000000..b765bd7 --- /dev/null +++ b/cmd/set.go @@ -0,0 +1,94 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + setCmd = &cobra.Command{ + Use: "set ID FIELDS", + Args: cobra.ExactArgs(2), + Short: "Set a document", + Long: "Set a document", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + id := args[0] + fields := args[1] + + req := &protobuf.SetRequest{} + m := marshaler.BlastMarshaler{} + if err := m.Unmarshal([]byte(fields), req); err != nil { + return err + } + req.Id = id + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + if err := c.Set(req); err != nil { + return err + } + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(setCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + setCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + setCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + setCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + setCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", setCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", setCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", setCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/snapshot.go b/cmd/snapshot.go new file mode 100644 index 0000000..2e76298 --- /dev/null +++ b/cmd/snapshot.go @@ -0,0 +1,81 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + snapshotCmd = &cobra.Command{ + Use: "snapshot", + Short: "Create a snapshot", + Long: "Create a snapshot which is full-volume copy of data stored on the node", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + if err := c.Snapshot(); err != nil { + return err + } + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(snapshotCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + snapshotCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + snapshotCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + snapshotCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + snapshotCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", snapshotCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", snapshotCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", snapshotCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/start.go b/cmd/start.go new file mode 100644 index 0000000..8f4e6ca --- /dev/null +++ b/cmd/start.go @@ -0,0 +1,211 @@ +package cmd + +import ( + "context" + "fmt" + "os" + "os/signal" + "syscall" + "time" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/log" + "github.com/mosuka/blast/mapping" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/server" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + startCmd = &cobra.Command{ + Use: "start", + Short: "Start the index server", + Long: "Start the index server", + RunE: func(cmd *cobra.Command, args []string) error { + id = viper.GetString("id") + raftAddress = viper.GetString("raft_address") + grpcAddress = viper.GetString("grpc_address") + httpAddress = viper.GetString("http_address") + dataDirectory = viper.GetString("data_directory") + peerGrpcAddress = viper.GetString("peer_grpc_address") + + mappingFile = viper.GetString("mapping_file") + + certificateFile = viper.GetString("certificate_file") + keyFile = viper.GetString("key_file") + commonName = viper.GetString("common_name") + + logLevel = viper.GetString("log_level") + logFile = viper.GetString("log_file") + logMaxSize = viper.GetInt("log_max_size") + logMaxBackups = viper.GetInt("log_max_backups") + logMaxAge = viper.GetInt("log_max_age") + logCompress = viper.GetBool("log_compress") + + logger := log.NewLogger( + logLevel, + logFile, + logMaxSize, + logMaxBackups, + logMaxAge, + logCompress, + ) + + bootstrap := peerGrpcAddress == "" || peerGrpcAddress == grpcAddress + + indexMapping := mapping.NewIndexMapping() + if mappingFile != "" { + var err error + if indexMapping, err = mapping.NewIndexMappingFromFile(mappingFile); err != nil { + return err + } + } + + raftServer, err := server.NewRaftServer(id, raftAddress, dataDirectory, indexMapping, bootstrap, logger) + if err != nil { + return err + } + + grpcServer, err := server.NewGRPCServer(grpcAddress, raftServer, certificateFile, keyFile, commonName, logger) + if err != nil { + return err + } + + grpcGateway, err := server.NewGRPCGateway(httpAddress, grpcAddress, certificateFile, keyFile, commonName, logger) + if err != nil { + return err + } + + quitCh := make(chan os.Signal, 1) + signal.Notify(quitCh, os.Kill, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + + if err := raftServer.Start(); err != nil { + return err + } + + if err := grpcServer.Start(); err != nil { + return err + } + + if err := grpcGateway.Start(); err != nil { + return err + } + + // wait for detect leader if it's bootstrap + if bootstrap { + timeout := 60 * time.Second + if err := raftServer.WaitForDetectLeader(timeout); err != nil { + return err + } + } + + // create gRPC client for joining node + var joinGrpcAddress string + if bootstrap { + joinGrpcAddress = grpcAddress + } else { + joinGrpcAddress = peerGrpcAddress + } + + c, err := client.NewGRPCClientWithContextTLS(joinGrpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + // join this node to the existing cluster + joinRequest := &protobuf.JoinRequest{ + Id: id, + Node: &protobuf.Node{ + RaftAddress: raftAddress, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + }, + } + if err = c.Join(joinRequest); err != nil { + return err + } + + // wait for receiving signal + <-quitCh + + _ = grpcGateway.Stop() + _ = grpcServer.Stop() + _ = raftServer.Stop() + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(startCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + startCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + startCmd.PersistentFlags().StringVar(&id, "id", "node1", "node ID") + startCmd.PersistentFlags().StringVar(&raftAddress, "raft-address", ":7000", "Raft server listen address") + startCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + startCmd.PersistentFlags().StringVar(&httpAddress, "http-address", ":8000", "HTTP server listen address") + startCmd.PersistentFlags().StringVar(&dataDirectory, "data-directory", "/tmp/blast/data", "data directory which store the index and Raft logs") + startCmd.PersistentFlags().StringVar(&peerGrpcAddress, "peer-grpc-address", "", "listen address of the existing gRPC server in the joining cluster") + startCmd.PersistentFlags().StringVar(&mappingFile, "mapping-file", "", "path to the index mapping file") + startCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + startCmd.PersistentFlags().StringVar(&keyFile, "key-file", "", "path to the client server TLS key file") + startCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + startCmd.PersistentFlags().StringVar(&logLevel, "log-level", "INFO", "log level") + startCmd.PersistentFlags().StringVar(&logFile, "log-file", os.Stderr.Name(), "log file") + startCmd.PersistentFlags().IntVar(&logMaxSize, "log-max-size", 500, "max size of a log file in megabytes") + startCmd.PersistentFlags().IntVar(&logMaxBackups, "log-max-backups", 3, "max backup count of log files") + startCmd.PersistentFlags().IntVar(&logMaxAge, "log-max-age", 30, "max age of a log file in days") + startCmd.PersistentFlags().BoolVar(&logCompress, "log-compress", false, "compress a log file") + + _ = viper.BindPFlag("id", startCmd.PersistentFlags().Lookup("id")) + _ = viper.BindPFlag("raft_address", startCmd.PersistentFlags().Lookup("raft-address")) + _ = viper.BindPFlag("grpc_address", startCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("http_address", startCmd.PersistentFlags().Lookup("http-address")) + _ = viper.BindPFlag("data_directory", startCmd.PersistentFlags().Lookup("data-directory")) + _ = viper.BindPFlag("peer_grpc_address", startCmd.PersistentFlags().Lookup("peer-grpc-address")) + _ = viper.BindPFlag("mapping_file", startCmd.PersistentFlags().Lookup("mapping-file")) + _ = viper.BindPFlag("certificate_file", startCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("key_file", startCmd.PersistentFlags().Lookup("key-file")) + _ = viper.BindPFlag("common_name", startCmd.PersistentFlags().Lookup("common-name")) + _ = viper.BindPFlag("log_level", startCmd.PersistentFlags().Lookup("log-level")) + _ = viper.BindPFlag("log_max_size", startCmd.PersistentFlags().Lookup("log-max-size")) + _ = viper.BindPFlag("log_max_backups", startCmd.PersistentFlags().Lookup("log-max-backups")) + _ = viper.BindPFlag("log_max_age", startCmd.PersistentFlags().Lookup("log-max-age")) + _ = viper.BindPFlag("log_compress", startCmd.PersistentFlags().Lookup("log-compress")) +} diff --git a/cmd/variables.go b/cmd/variables.go new file mode 100644 index 0000000..8022742 --- /dev/null +++ b/cmd/variables.go @@ -0,0 +1,22 @@ +package cmd + +var ( + configFile string + id string + raftAddress string + grpcAddress string + httpAddress string + dataDirectory string + peerGrpcAddress string + mappingFile string + certificateFile string + keyFile string + commonName string + file string + logLevel string + logFile string + logMaxSize int + logMaxBackups int + logMaxAge int + logCompress bool +) diff --git a/cmd/version.go b/cmd/version.go new file mode 100644 index 0000000..01d8fa1 --- /dev/null +++ b/cmd/version.go @@ -0,0 +1,24 @@ +package cmd + +import ( + "fmt" + + "github.com/mosuka/blast/version" + "github.com/spf13/cobra" +) + +var ( + versionCmd = &cobra.Command{ + Use: "version", + Short: "Print the version number", + Long: "Print the version number", + RunE: func(cmd *cobra.Command, args []string) error { + fmt.Printf("version: %s\n", version.Version) + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(versionCmd) +} diff --git a/cmd/watch.go b/cmd/watch.go new file mode 100644 index 0000000..da6be9f --- /dev/null +++ b/cmd/watch.go @@ -0,0 +1,157 @@ +package cmd + +import ( + "context" + "fmt" + "io" + "os" + "os/signal" + "syscall" + + "github.com/golang/protobuf/ptypes/empty" + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + watchCmd = &cobra.Command{ + Use: "watch", + Short: "Watch a node updates", + Long: "Watch a node updates", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + req := &empty.Empty{} + watchClient, err := c.Watch(req) + if err != nil { + return err + } + + go func() { + for { + resp, err := watchClient.Recv() + if err == io.EOF { + break + } + if err != nil { + break + } + + switch resp.Event.Type { + case protobuf.Event_Join: + eventReq := &protobuf.SetMetadataRequest{} + if eventData, err := marshaler.MarshalAny(resp.Event.Data); err != nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, %v", resp.Event.Type.String(), err)) + } else { + if eventData == nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, nil", resp.Event.Type.String())) + } else { + eventReq = eventData.(*protobuf.SetMetadataRequest) + } + } + fmt.Printf("%s, %v\n", resp.Event.Type.String(), eventReq) + case protobuf.Event_Leave: + eventReq := &protobuf.DeleteMetadataRequest{} + if eventData, err := marshaler.MarshalAny(resp.Event.Data); err != nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, %v", resp.Event.Type.String(), err)) + } else { + if eventData == nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, nil", resp.Event.Type.String())) + } else { + eventReq = eventData.(*protobuf.DeleteMetadataRequest) + } + } + fmt.Printf("%s, %v\n", resp.Event.Type.String(), eventReq) + case protobuf.Event_Set: + putRequest := &protobuf.SetRequest{} + if putRequestInstance, err := marshaler.MarshalAny(resp.Event.Data); err != nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, %v", resp.Event.Type.String(), err)) + } else { + if putRequestInstance == nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, nil", resp.Event.Type.String())) + } else { + putRequest = putRequestInstance.(*protobuf.SetRequest) + } + } + fmt.Printf("%s, %v\n", resp.Event.Type.String(), putRequest) + case protobuf.Event_Delete: + deleteRequest := &protobuf.DeleteRequest{} + if deleteRequestInstance, err := marshaler.MarshalAny(resp.Event.Data); err != nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, %v", resp.Event.Type.String(), err)) + } else { + if deleteRequestInstance == nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, nil", resp.Event.Type.String())) + } else { + deleteRequest = deleteRequestInstance.(*protobuf.DeleteRequest) + } + } + fmt.Printf("%s, %v\n", resp.Event.Type.String(), deleteRequest) + } + } + }() + + quitCh := make(chan os.Signal, 1) + signal.Notify(quitCh, os.Kill, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + + <-quitCh + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(watchCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + watchCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + watchCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + watchCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + watchCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", watchCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", watchCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", watchCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/dispatcher/grpc_client.go b/dispatcher/grpc_client.go deleted file mode 100644 index 5ca4658..0000000 --- a/dispatcher/grpc_client.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - "context" - "math" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/protobuf/distribute" - "google.golang.org/grpc" -) - -type GRPCClient struct { - ctx context.Context - cancel context.CancelFunc - conn *grpc.ClientConn - client distribute.DistributeClient -} - -func NewGRPCContext() (context.Context, context.CancelFunc) { - baseCtx := context.TODO() - //return context.WithTimeout(baseCtx, 60*time.Second) - return context.WithCancel(baseCtx) -} - -func NewGRPCClient(address string) (*GRPCClient, error) { - ctx, cancel := NewGRPCContext() - - //streamRetryOpts := []grpc_retry.CallOption{ - // grpc_retry.Disable(), - //} - - //unaryRetryOpts := []grpc_retry.CallOption{ - // grpc_retry.WithBackoff(grpc_retry.BackoffLinear(100 * time.Millisecond)), - // grpc_retry.WithCodes(codes.Unavailable), - // grpc_retry.WithMax(100), - //} - - dialOpts := []grpc.DialOption{ - grpc.WithInsecure(), - grpc.WithDefaultCallOptions( - grpc.MaxCallSendMsgSize(math.MaxInt32), - grpc.MaxCallRecvMsgSize(math.MaxInt32), - ), - //grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(streamRetryOpts...)), - //grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(unaryRetryOpts...)), - } - - conn, err := grpc.DialContext(ctx, address, dialOpts...) - if err != nil { - return nil, err - } - - return &GRPCClient{ - ctx: ctx, - cancel: cancel, - conn: conn, - client: distribute.NewDistributeClient(conn), - }, nil -} - -func (c *GRPCClient) Cancel() { - c.cancel() -} - -func (c *GRPCClient) Close() error { - c.Cancel() - if c.conn != nil { - return c.conn.Close() - } - - return c.ctx.Err() -} - -func (c *GRPCClient) GetAddress() string { - return c.conn.Target() -} - -func (c *GRPCClient) NodeHealthCheck(req *distribute.NodeHealthCheckRequest, opts ...grpc.CallOption) (*distribute.NodeHealthCheckResponse, error) { - return c.client.NodeHealthCheck(c.ctx, req, opts...) -} - -func (c *GRPCClient) Get(req *distribute.GetRequest, opts ...grpc.CallOption) (*distribute.GetResponse, error) { - return c.client.Get(c.ctx, req, opts...) -} - -func (c *GRPCClient) Index(req *distribute.IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.Index(c.ctx, req, opts...) -} - -func (c *GRPCClient) Delete(req *distribute.DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.Delete(c.ctx, req, opts...) -} - -func (c *GRPCClient) BulkIndex(req *distribute.BulkIndexRequest, opts ...grpc.CallOption) (*distribute.BulkIndexResponse, error) { - return c.client.BulkIndex(c.ctx, req, opts...) -} - -func (c *GRPCClient) BulkDelete(req *distribute.BulkDeleteRequest, opts ...grpc.CallOption) (*distribute.BulkDeleteResponse, error) { - return c.client.BulkDelete(c.ctx, req, opts...) -} - -func (c *GRPCClient) Search(req *distribute.SearchRequest, opts ...grpc.CallOption) (*distribute.SearchResponse, error) { - return c.client.Search(c.ctx, req, opts...) -} diff --git a/dispatcher/grpc_gateway.go b/dispatcher/grpc_gateway.go deleted file mode 100644 index f962b4e..0000000 --- a/dispatcher/grpc_gateway.go +++ /dev/null @@ -1,353 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - "bufio" - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - - "github.com/blevesearch/bleve" - "github.com/golang/protobuf/ptypes/any" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/distribute" - "github.com/mosuka/blast/protobuf/index" - "go.uber.org/zap" - "google.golang.org/grpc" -) - -type JsonMarshaler struct{} - -// ContentType always Returns "application/json". -func (*JsonMarshaler) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -func (j *JsonMarshaler) Marshal(v interface{}) ([]byte, error) { - switch v.(type) { - case *distribute.GetResponse: - value, err := protobuf.MarshalAny(v.(*distribute.GetResponse).Fields) - if err != nil { - return nil, err - } - return json.Marshal( - map[string]interface{}{ - "fields": value, - }, - ) - case *distribute.SearchResponse: - value, err := protobuf.MarshalAny(v.(*distribute.SearchResponse).SearchResult) - if err != nil { - return nil, err - } - return json.Marshal( - map[string]interface{}{ - "search_result": value, - }, - ) - default: - return json.Marshal(v) - } -} - -// Unmarshal unmarshals JSON data into "v". -func (j *JsonMarshaler) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// NewDecoder returns a Decoder which reads JSON stream from "r". -func (j *JsonMarshaler) NewDecoder(r io.Reader) runtime.Decoder { - return runtime.DecoderFunc( - func(v interface{}) error { - buffer, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - switch v.(type) { - case *distribute.IndexRequest: - var tmpValue map[string]interface{} - err = json.Unmarshal(buffer, &tmpValue) - if err != nil { - return err - } - id, ok := tmpValue["id"].(string) - if ok { - v.(*distribute.IndexRequest).Id = id - } - - fields, ok := tmpValue["fields"] - if !ok { - return errors.New("value does not exist") - } - v.(*distribute.IndexRequest).Fields = &any.Any{} - return protobuf.UnmarshalAny(fields, v.(*distribute.IndexRequest).Fields) - case *distribute.SearchRequest: - var tmpValue map[string]interface{} - err = json.Unmarshal(buffer, &tmpValue) - if err != nil { - return err - } - searchRequestMap, ok := tmpValue["search_request"] - if !ok { - return errors.New("value does not exist") - } - searchRequestBytes, err := json.Marshal(searchRequestMap) - if err != nil { - return err - } - var searchRequest *bleve.SearchRequest - err = json.Unmarshal(searchRequestBytes, &searchRequest) - if err != nil { - return err - } - v.(*distribute.SearchRequest).SearchRequest = &any.Any{} - return protobuf.UnmarshalAny(searchRequest, v.(*distribute.SearchRequest).SearchRequest) - default: - return json.Unmarshal(buffer, v) - } - }, - ) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JsonMarshaler) NewEncoder(w io.Writer) runtime.Encoder { - return json.NewEncoder(w) -} - -// Delimiter for newline encoded JSON streams. -func (j *JsonMarshaler) Delimiter() []byte { - return []byte("\n") -} - -type JsonlMarshaler struct{} - -// ContentType always Returns "application/json". -func (*JsonlMarshaler) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -func (j *JsonlMarshaler) Marshal(v interface{}) ([]byte, error) { - return json.Marshal(v) -} - -// Unmarshal unmarshals JSON data into "v". -func (j *JsonlMarshaler) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// NewDecoder returns a Decoder which reads JSON-LINE stream from "r". -func (j *JsonlMarshaler) NewDecoder(r io.Reader) runtime.Decoder { - return runtime.DecoderFunc( - func(v interface{}) error { - buffer, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - switch v.(type) { - case *distribute.BulkIndexRequest: - docs := make([]*index.Document, 0) - reader := bufio.NewReader(bytes.NewReader(buffer)) - for { - docBytes, err := reader.ReadBytes('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - break - } - } - - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - } - v.(*distribute.BulkIndexRequest).Documents = docs - return nil - default: - return json.Unmarshal(buffer, v) - } - }, - ) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JsonlMarshaler) NewEncoder(w io.Writer) runtime.Encoder { - return json.NewEncoder(w) -} - -// Delimiter for newline encoded JSON streams. -func (j *JsonlMarshaler) Delimiter() []byte { - return []byte("\n") -} - -type TextMarshaler struct{} - -// ContentType always Returns "application/json". -func (*TextMarshaler) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -func (j *TextMarshaler) Marshal(v interface{}) ([]byte, error) { - return json.Marshal(v) -} - -// Unmarshal unmarshals JSON data into "v". -func (j *TextMarshaler) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// NewDecoder returns a Decoder which reads text stream from "r". -func (j *TextMarshaler) NewDecoder(r io.Reader) runtime.Decoder { - return runtime.DecoderFunc( - func(v interface{}) error { - buffer, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - switch v.(type) { - case *distribute.BulkDeleteRequest: - ids := make([]string, 0) - reader := bufio.NewReader(bytes.NewReader(buffer)) - for { - //idBytes, err := reader.ReadBytes('\n') - idBytes, _, err := reader.ReadLine() - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(idBytes) > 0 { - ids = append(ids, string(idBytes)) - } - break - } - } - - if len(idBytes) > 0 { - ids = append(ids, string(idBytes)) - } - } - v.(*distribute.BulkDeleteRequest).Ids = ids - return nil - default: - return json.Unmarshal(buffer, v) - } - }, - ) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *TextMarshaler) NewEncoder(w io.Writer) runtime.Encoder { - return json.NewEncoder(w) -} - -// Delimiter for newline encoded JSON streams. -func (j *TextMarshaler) Delimiter() []byte { - return []byte("\n") -} - -type GRPCGateway struct { - grpcGatewayAddr string - grpcAddr string - logger *zap.Logger - - ctx context.Context - cancel context.CancelFunc - listener net.Listener -} - -func NewGRPCGateway(grpcGatewayAddr string, grpcAddr string, logger *zap.Logger) (*GRPCGateway, error) { - return &GRPCGateway{ - grpcGatewayAddr: grpcGatewayAddr, - grpcAddr: grpcAddr, - logger: logger, - }, nil -} - -func (s *GRPCGateway) Start() error { - s.ctx, s.cancel = NewGRPCContext() - - mux := runtime.NewServeMux( - runtime.WithMarshalerOption("application/json", new(JsonMarshaler)), - runtime.WithMarshalerOption("application/x-ndjson", new(JsonlMarshaler)), - runtime.WithMarshalerOption("text/plain", new(TextMarshaler)), - ) - opts := []grpc.DialOption{grpc.WithInsecure()} - - err := distribute.RegisterDistributeHandlerFromEndpoint(s.ctx, mux, s.grpcAddr, opts) - if err != nil { - return err - } - - s.listener, err = net.Listen("tcp", s.grpcGatewayAddr) - if err != nil { - return err - } - - err = http.Serve(s.listener, mux) - if err != nil { - return err - } - - return nil -} - -func (s *GRPCGateway) Stop() error { - defer s.cancel() - - err := s.listener.Close() - if err != nil { - return err - } - - return nil -} - -func (s *GRPCGateway) GetAddress() (string, error) { - tcpAddr, err := net.ResolveTCPAddr("tcp", s.listener.Addr().String()) - if err != nil { - return "", err - } - - v4Addr := "" - if tcpAddr.IP.To4() != nil { - v4Addr = tcpAddr.IP.To4().String() - } - port := tcpAddr.Port - - return fmt.Sprintf("%s:%d", v4Addr, port), nil -} diff --git a/dispatcher/grpc_server.go b/dispatcher/grpc_server.go deleted file mode 100644 index 7bc684e..0000000 --- a/dispatcher/grpc_server.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - "net" - - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/mosuka/blast/protobuf/distribute" - "go.uber.org/zap" - "google.golang.org/grpc" - //grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" - //grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" - //grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags" - //grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" -) - -type GRPCServer struct { - service distribute.DistributeServer - server *grpc.Server - listener net.Listener - - logger *zap.Logger -} - -func NewGRPCServer(grpcAddr string, service distribute.DistributeServer, logger *zap.Logger) (*GRPCServer, error) { - server := grpc.NewServer( - grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( - //grpc_ctxtags.StreamServerInterceptor(), - //grpc_opentracing.StreamServerInterceptor(), - grpc_prometheus.StreamServerInterceptor, - grpc_zap.StreamServerInterceptor(logger), - //grpc_auth.StreamServerInterceptor(myAuthFunction), - //grpc_recovery.StreamServerInterceptor(), - )), - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( - //grpc_ctxtags.UnaryServerInterceptor(), - //grpc_opentracing.UnaryServerInterceptor(), - grpc_prometheus.UnaryServerInterceptor, - grpc_zap.UnaryServerInterceptor(logger), - //grpc_auth.UnaryServerInterceptor(myAuthFunction), - //grpc_recovery.UnaryServerInterceptor(), - )), - ) - - distribute.RegisterDistributeServer(server, service) - - grpc_prometheus.EnableHandlingTimeHistogram() - grpc_prometheus.Register(server) - - listener, err := net.Listen("tcp", grpcAddr) - if err != nil { - return nil, err - } - - return &GRPCServer{ - service: service, - server: server, - listener: listener, - logger: logger, - }, nil -} - -func (s *GRPCServer) Start() error { - s.logger.Info("start server") - err := s.server.Serve(s.listener) - if err != nil { - return err - } - - return nil -} - -func (s *GRPCServer) Stop() error { - s.logger.Info("stop server") - s.server.Stop() - //s.server.GracefulStop() - - return nil -} diff --git a/dispatcher/grpc_service.go b/dispatcher/grpc_service.go deleted file mode 100644 index 0657119..0000000 --- a/dispatcher/grpc_service.go +++ /dev/null @@ -1,974 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - "context" - "encoding/json" - "errors" - "hash/fnv" - "io" - "math/rand" - "sort" - "sync" - "time" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/search" - "github.com/golang/protobuf/ptypes/any" - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/distribute" - "github.com/mosuka/blast/protobuf/index" - "github.com/mosuka/blast/protobuf/management" - "github.com/mosuka/blast/sortutils" - "go.uber.org/zap" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type GRPCService struct { - managerGrpcAddress string - logger *zap.Logger - - managers *management.Cluster - managerClients map[string]*manager.GRPCClient - updateManagersStopCh chan struct{} - updateManagersDoneCh chan struct{} - - indexers map[string]*index.Cluster - indexerClients map[string]map[string]*indexer.GRPCClient - updateIndexersStopCh chan struct{} - updateIndexersDoneCh chan struct{} -} - -func NewGRPCService(managerGrpcAddress string, logger *zap.Logger) (*GRPCService, error) { - return &GRPCService{ - managerGrpcAddress: managerGrpcAddress, - logger: logger, - - managers: &management.Cluster{Nodes: make(map[string]*management.Node, 0)}, - managerClients: make(map[string]*manager.GRPCClient, 0), - - indexers: make(map[string]*index.Cluster, 0), - indexerClients: make(map[string]map[string]*indexer.GRPCClient, 0), - }, nil -} - -func (s *GRPCService) Start() error { - var err error - s.managers, err = s.getManagerCluster(s.managerGrpcAddress) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - for id, node := range s.managers.Nodes { - client, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Fatal(err.Error(), zap.String("id", id), zap.String("grpc_address", s.managerGrpcAddress)) - } - s.managerClients[node.Id] = client - } - - s.logger.Info("start to update manager cluster info") - go s.startUpdateManagers(500 * time.Millisecond) - - s.logger.Info("start to update indexer cluster info") - go s.startUpdateIndexers(500 * time.Millisecond) - - return nil -} - -func (s *GRPCService) Stop() error { - s.logger.Info("stop to update manager cluster info") - s.stopUpdateManagers() - - s.logger.Info("stop to update indexer cluster info") - s.stopUpdateIndexers() - - return nil -} - -func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { - var client *manager.GRPCClient - - for id, node := range s.managers.Nodes { - if node.Metadata == nil { - s.logger.Warn("missing metadata", zap.String("id", id)) - continue - } - - if node.State == management.Node_FOLLOWER || node.State == management.Node_LEADER { - var ok bool - client, ok = s.managerClients[id] - if ok { - return client, nil - } else { - s.logger.Error("node does not exist", zap.String("id", id)) - } - } else { - s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", node.State.String())) - } - } - - err := errors.New("available client does not exist") - s.logger.Error(err.Error()) - - return nil, err -} - -func (s *GRPCService) getManagerCluster(managerAddr string) (*management.Cluster, error) { - client, err := manager.NewGRPCClient(managerAddr) - defer func() { - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - return - }() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - req := &empty.Empty{} - res, err := client.ClusterInfo(req) - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return res.Cluster, nil -} - -func (s *GRPCService) cloneManagerCluster(cluster *management.Cluster) (*management.Cluster, error) { - b, err := json.Marshal(cluster) - if err != nil { - return nil, err - } - - var clone *management.Cluster - err = json.Unmarshal(b, &clone) - if err != nil { - return nil, err - } - - return clone, nil -} - -func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { - s.updateManagersStopCh = make(chan struct{}) - s.updateManagersDoneCh = make(chan struct{}) - - defer func() { - close(s.updateManagersDoneCh) - }() - - for { - select { - case <-s.updateManagersStopCh: - s.logger.Info("received a request to stop updating a manager cluster") - return - default: - // get client for manager from the list - client, err := s.getManagerClient() - if err != nil { - s.logger.Error(err.Error()) - continue - } - - // create stream for watching cluster changes - req := &empty.Empty{} - stream, err := client.ClusterWatch(req) - if err != nil { - s.logger.Error(err.Error()) - continue - } - - s.logger.Info("wait for receive a manager cluster updates from stream") - resp, err := stream.Recv() - if err == io.EOF { - s.logger.Info(err.Error()) - continue - } - if err != nil { - s.logger.Error(err.Error()) - continue - } - s.logger.Info("cluster has changed", zap.Any("resp", resp)) - switch resp.Event { - case management.ClusterWatchResponse_JOIN, management.ClusterWatchResponse_UPDATE: - // add to cluster nodes - s.managers.Nodes[resp.Node.Id] = resp.Node - - // check node state - switch resp.Node.State { - case management.Node_UNKNOWN, management.Node_SHUTDOWN: - // close client - if client, exist := s.managerClients[resp.Node.Id]; exist { - s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id)) - } - delete(s.managerClients, resp.Node.Id) - } - default: // management.Node_FOLLOWER, management.Node_CANDIDATE, management.Node_LEADER - if resp.Node.Metadata.GrpcAddress == "" { - s.logger.Warn("missing gRPC address", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - continue - } - - // check client that already exist in the client list - if client, exist := s.managerClients[resp.Node.Id]; !exist { - // create new client - s.logger.Info("create gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - newClient, err := manager.NewGRPCClient(resp.Node.Metadata.GrpcAddress) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - continue - } - s.managerClients[resp.Node.Id] = newClient - } else { - if client.GetAddress() != resp.Node.Metadata.GrpcAddress { - // close client - s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id)) - } - delete(s.managerClients, resp.Node.Id) - - // re-create new client - s.logger.Info("re-create gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - newClient, err := manager.NewGRPCClient(resp.Node.Metadata.GrpcAddress) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - continue - } - s.managerClients[resp.Node.Id] = newClient - } - } - } - case management.ClusterWatchResponse_LEAVE: - if client, exist := s.managerClients[resp.Node.Id]; exist { - s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) - } - delete(s.managerClients, resp.Node.Id) - } - - if _, exist := s.managers.Nodes[resp.Node.Id]; exist { - delete(s.managers.Nodes, resp.Node.Id) - } - default: - s.logger.Debug("unknown event", zap.Any("event", resp.Event)) - continue - } - } - } -} - -func (s *GRPCService) stopUpdateManagers() { - s.logger.Info("close all manager clients") - for id, client := range s.managerClients { - s.logger.Debug("close manager client", zap.String("id", id), zap.String("address", client.GetAddress())) - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - } - - if s.updateManagersStopCh != nil { - s.logger.Info("send a request to stop updating a manager cluster") - close(s.updateManagersStopCh) - } - - s.logger.Info("wait for the manager cluster update to stop") - <-s.updateManagersDoneCh - s.logger.Info("the manager cluster update has been stopped") -} - -func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { - s.updateIndexersStopCh = make(chan struct{}) - s.updateIndexersDoneCh = make(chan struct{}) - - defer func() { - close(s.updateIndexersDoneCh) - }() - - // get active client for manager - client, err := s.getManagerClient() - if err != nil { - s.logger.Error(err.Error()) - } - - // get initial indexers - req := &management.GetRequest{ - Key: "/cluster/shards", - } - res, err := client.Get(req) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - if res.Value == nil { - s.logger.Error("/cluster/shards is nil") - } - - shards, err := protobuf.MarshalAny(res.Value) - for shardId, shard := range *shards.(*map[string]interface{}) { - shardBytes, err := json.Marshal(shard) - if err != nil { - s.logger.Error(err.Error()) - continue - } - - var cluster *index.Cluster - err = json.Unmarshal(shardBytes, &cluster) - if err != nil { - s.logger.Error(err.Error()) - continue - } - - s.indexers[shardId] = cluster - - for nodeId, node := range cluster.Nodes { - if node.Metadata.GrpcAddress == "" { - s.logger.Warn("missing gRPC address", zap.String("id", node.Id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - newClient, err := indexer.NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - if _, exist := s.indexerClients[shardId]; !exist { - s.indexerClients[shardId] = make(map[string]*indexer.GRPCClient) - } - s.indexerClients[shardId][nodeId] = newClient - } - } - - for { - select { - case <-s.updateIndexersStopCh: - s.logger.Info("received a request to stop updating a indexer cluster") - return - default: - client, err = s.getManagerClient() - if err != nil { - s.logger.Error(err.Error()) - continue - } - - watchReq := &management.WatchRequest{ - Key: "/cluster/shards/", - } - stream, err := client.Watch(watchReq) - if err != nil { - s.logger.Error(err.Error()) - continue - } - - s.logger.Info("wait for receive a indexer cluster updates from stream") - resp, err := stream.Recv() - if err == io.EOF { - continue - } - if err != nil { - s.logger.Error(err.Error()) - continue - } - s.logger.Debug("data has changed", zap.Any("command", resp.Command), zap.String("key", resp.Key), zap.Any("value", resp.Value)) - - getReq := &management.GetRequest{ - Key: "/cluster/shards/", - } - res, err := client.Get(getReq) - if err != nil { - s.logger.Error(err.Error()) - continue - } - if res.Value == nil { - s.logger.Error("/cluster/shards is nil") - continue - } - - shards, err := protobuf.MarshalAny(res.Value) - for shardId, shard := range *shards.(*map[string]interface{}) { - shardBytes, err := json.Marshal(shard) - if err != nil { - s.logger.Error(err.Error()) - continue - } - - var cluster *index.Cluster - err = json.Unmarshal(shardBytes, &cluster) - if err != nil { - s.logger.Error(err.Error()) - continue - } - - s.indexers[shardId] = cluster - - if _, exist := s.indexerClients[shardId]; !exist { - s.indexerClients[shardId] = make(map[string]*indexer.GRPCClient) - } - - // open clients for indexer nodes - for nodeId, node := range cluster.Nodes { - if node.Metadata.GrpcAddress == "" { - s.logger.Warn("missing gRPC address", zap.String("id", node.Id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - - // check client that already exist in the client list - if client, exist := s.indexerClients[shardId][node.Id]; !exist { - // create new client - newClient, err := indexer.NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - s.indexerClients[shardId][nodeId] = newClient - } else { - if client.GetAddress() != node.Metadata.GrpcAddress { - // close client - s.logger.Info("close gRPC client", zap.String("id", node.Id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", node.Id)) - } - delete(s.indexerClients[shardId], node.Id) - - // re-create new client - newClient, err := indexer.NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - s.indexerClients[shardId][nodeId] = newClient - } - } - } - - // close clients for non-existent indexer nodes - for id, client := range s.indexerClients[shardId] { - if _, exist := s.indexers[shardId].Nodes[id]; !exist { - s.logger.Info("close gRPC client", zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) - } - delete(s.indexerClients[shardId], id) - } - } - } - } - } -} - -func (s *GRPCService) stopUpdateIndexers() { - s.logger.Info("close all indexer clients") - for clusterId, cluster := range s.indexerClients { - for id, client := range cluster { - s.logger.Debug("close indexer client", zap.String("cluster_id", clusterId), zap.String("id", id), zap.String("address", client.GetAddress())) - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - } - } - - if s.updateIndexersStopCh != nil { - s.logger.Info("send a request to stop updating a index cluster") - close(s.updateIndexersStopCh) - } - - s.logger.Info("wait for the indexer cluster update to stop") - <-s.updateIndexersDoneCh - s.logger.Info("the indexer cluster update has been stopped") -} - -func (s *GRPCService) getIndexerClients() map[string]*indexer.GRPCClient { - indexerClients := make(map[string]*indexer.GRPCClient, 0) - - for clusterId, cluster := range s.indexerClients { - nodeIds := make([]string, 0) - for nodeId := range cluster { - nodeIds = append(nodeIds, nodeId) - } - - // pick a client at random - nodeId := nodeIds[rand.New(rand.NewSource(time.Now().UnixNano())).Intn(len(nodeIds))] - - indexerClients[clusterId] = s.indexerClients[clusterId][nodeId] - } - - return indexerClients -} - -func (s *GRPCService) NodeHealthCheck(ctx context.Context, req *distribute.NodeHealthCheckRequest) (*distribute.NodeHealthCheckResponse, error) { - resp := &distribute.NodeHealthCheckResponse{} - - switch req.Probe { - case distribute.NodeHealthCheckRequest_UNKNOWN: - fallthrough - case distribute.NodeHealthCheckRequest_HEALTHINESS: - resp.State = distribute.NodeHealthCheckResponse_HEALTHY - case distribute.NodeHealthCheckRequest_LIVENESS: - resp.State = distribute.NodeHealthCheckResponse_ALIVE - case distribute.NodeHealthCheckRequest_READINESS: - resp.State = distribute.NodeHealthCheckResponse_READY - default: - err := errors.New("unknown probe") - s.logger.Error(err.Error()) - return resp, status.Error(codes.InvalidArgument, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) Get(ctx context.Context, req *distribute.GetRequest) (*distribute.GetResponse, error) { - indexerClients := s.getIndexerClients() - - // cluster id list sorted by cluster id - clusterIds := make([]string, 0) - for clusterId := range indexerClients { - clusterIds = append(clusterIds, clusterId) - sort.Strings(clusterIds) - } - - type respVal struct { - clusterId string - res *index.GetResponse - err error - } - - // create response channel - respChan := make(chan respVal, len(clusterIds)) - - wg := &sync.WaitGroup{} - for clusterId, client := range indexerClients { - wg.Add(1) - go func(clusterId string, client *indexer.GRPCClient, id string, respChan chan respVal) { - // index documents - req := &index.GetRequest{ - Id: id, - } - res, err := client.Get(req) - - wg.Done() - respChan <- respVal{ - clusterId: clusterId, - res: res, - err: err, - } - }(clusterId, client, req.Id, respChan) - } - wg.Wait() - - // close response channel - close(respChan) - - // summarize responses - iRes := &index.GetResponse{} - for r := range respChan { - if r.res != nil { - iRes = r.res - } - if r.err != nil { - s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) - } - } - - resp := &distribute.GetResponse{ - Fields: iRes.Fields, - } - - return resp, nil -} - -func (s *GRPCService) docIdHash(docId string) uint64 { - hash := fnv.New64() - _, err := hash.Write([]byte(docId)) - if err != nil { - return 0 - } - - return hash.Sum64() -} - -func (s *GRPCService) Index(ctx context.Context, req *distribute.IndexRequest) (*empty.Empty, error) { - res := &empty.Empty{} - - indexerClients := s.getIndexerClients() - - // cluster id list sorted by cluster id - clusterIds := make([]string, 0) - for clusterId := range indexerClients { - clusterIds = append(clusterIds, clusterId) - sort.Strings(clusterIds) - } - - docIdHash := s.docIdHash(req.Id) - clusterNum := uint64(len(indexerClients)) - clusterId := clusterIds[int(docIdHash%clusterNum)] - - iReq := &index.IndexRequest{ - Id: req.Id, - Fields: req.Fields, - } - - res, err := indexerClients[clusterId].Index(iReq) - if err != nil { - s.logger.Error(err.Error()) - return res, status.Error(codes.Internal, err.Error()) - } - - return res, nil -} - -func (s *GRPCService) Delete(ctx context.Context, req *distribute.DeleteRequest) (*empty.Empty, error) { - resp := &empty.Empty{} - - indexerClients := s.getIndexerClients() - - // cluster id list sorted by cluster id - clusterIds := make([]string, 0) - for clusterId := range indexerClients { - clusterIds = append(clusterIds, clusterId) - sort.Strings(clusterIds) - } - - type respVal struct { - clusterId string - err error - } - - // create response channel - respChan := make(chan respVal, len(clusterIds)) - - wg := &sync.WaitGroup{} - for clusterId, client := range indexerClients { - wg.Add(1) - go func(clusterId string, client *indexer.GRPCClient, id string, respChan chan respVal) { - // index documents - iReq := &index.DeleteRequest{Id: id} - _, err := client.Delete(iReq) - wg.Done() - respChan <- respVal{ - clusterId: clusterId, - err: err, - } - }(clusterId, client, req.Id, respChan) - } - wg.Wait() - - // close response channel - close(respChan) - - for r := range respChan { - if r.err != nil { - s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) - } - } - - return resp, nil -} - -func (s *GRPCService) BulkIndex(ctx context.Context, req *distribute.BulkIndexRequest) (*distribute.BulkIndexResponse, error) { - indexerClients := s.getIndexerClients() - - // cluster id list sorted by cluster id - clusterIds := make([]string, 0) - for clusterId := range indexerClients { - clusterIds = append(clusterIds, clusterId) - sort.Strings(clusterIds) - } - - // initialize document list for each cluster - docSet := make(map[string][]*index.Document, 0) - for _, clusterId := range clusterIds { - docSet[clusterId] = make([]*index.Document, 0) - } - - for _, doc := range req.Documents { - // distribute documents to each cluster based on document id - docIdHash := s.docIdHash(doc.Id) - clusterNum := uint64(len(indexerClients)) - clusterId := clusterIds[int(docIdHash%clusterNum)] - docSet[clusterId] = append(docSet[clusterId], doc) - } - - type respVal struct { - clusterId string - res *index.BulkIndexResponse - err error - } - - // create response channel - respChan := make(chan respVal, len(clusterIds)) - - wg := &sync.WaitGroup{} - for clusterId, docs := range docSet { - wg.Add(1) - go func(clusterId string, docs []*index.Document, respChan chan respVal) { - iReq := &index.BulkIndexRequest{ - Documents: docs, - } - iRes, err := indexerClients[clusterId].BulkIndex(iReq) - wg.Done() - respChan <- respVal{ - clusterId: clusterId, - res: iRes, - err: err, - } - }(clusterId, docs, respChan) - } - wg.Wait() - - // close response channel - close(respChan) - - // summarize responses - totalCount := 0 - for r := range respChan { - if r.res.Count >= 0 { - totalCount += int(r.res.Count) - } - if r.err != nil { - s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) - } - } - - // response - return &distribute.BulkIndexResponse{ - Count: int32(totalCount), - }, nil -} - -func (s *GRPCService) BulkDelete(ctx context.Context, req *distribute.BulkDeleteRequest) (*distribute.BulkDeleteResponse, error) { - indexerClients := s.getIndexerClients() - - // cluster id list sorted by cluster id - clusterIds := make([]string, 0) - for clusterId := range indexerClients { - clusterIds = append(clusterIds, clusterId) - sort.Strings(clusterIds) - } - - type respVal struct { - clusterId string - res *index.BulkDeleteResponse - err error - } - - // create response channel - respChan := make(chan respVal, len(clusterIds)) - - wg := &sync.WaitGroup{} - for clusterId, client := range indexerClients { - wg.Add(1) - go func(clusterId string, client *indexer.GRPCClient, ids []string, respChan chan respVal) { - // index documents - iReq := &index.BulkDeleteRequest{ - Ids: ids, - } - iRes, err := client.BulkDelete(iReq) - wg.Done() - respChan <- respVal{ - clusterId: clusterId, - res: iRes, - err: err, - } - }(clusterId, client, req.Ids, respChan) - } - wg.Wait() - - // close response channel - close(respChan) - - // summarize responses - totalCount := 0 - for r := range respChan { - if r.res.Count >= 0 { - totalCount += int(r.res.Count) - } - if r.err != nil { - s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) - } - } - // response - return &distribute.BulkDeleteResponse{ - Count: int32(totalCount), - }, nil -} - -func (s *GRPCService) Search(ctx context.Context, req *distribute.SearchRequest) (*distribute.SearchResponse, error) { - start := time.Now() - - resp := &distribute.SearchResponse{} - - indexerClients := s.getIndexerClients() - - // cluster id list sorted by cluster id - clusterIds := make([]string, 0) - for clusterId := range indexerClients { - clusterIds = append(clusterIds, clusterId) - sort.Strings(clusterIds) - } - - type respVal struct { - clusterId string - searchResult *bleve.SearchResult - err error - } - - // create response channel - respChan := make(chan respVal, len(clusterIds)) - - // create search request - ins, err := protobuf.MarshalAny(req.SearchRequest) - if err != nil { - s.logger.Error(err.Error()) - return resp, err - } - searchRequest := ins.(*bleve.SearchRequest) - - // change to distributed search request - from := searchRequest.From - size := searchRequest.Size - searchRequest.From = 0 - searchRequest.Size = from + size - - wg := &sync.WaitGroup{} - for clusterId, client := range indexerClients { - wg.Add(1) - go func(clusterId string, client *indexer.GRPCClient, searchRequest *bleve.SearchRequest, respChan chan respVal) { - searchRequestAny := &any.Any{} - err := protobuf.UnmarshalAny(searchRequest, searchRequestAny) - if err != nil { - respChan <- respVal{ - clusterId: clusterId, - searchResult: nil, - err: err, - } - return - } - - iReq := &index.SearchRequest{ - SearchRequest: searchRequestAny, - } - - iRes, err := client.Search(iReq) - - searchResult, err := protobuf.MarshalAny(iRes.SearchResult) - if err != nil { - respChan <- respVal{ - clusterId: clusterId, - searchResult: nil, - err: err, - } - return - } - - wg.Done() - respChan <- respVal{ - clusterId: clusterId, - searchResult: searchResult.(*bleve.SearchResult), - err: err, - } - }(clusterId, client, searchRequest, respChan) - } - wg.Wait() - - // close response channel - close(respChan) - - // revert to original search request - searchRequest.From = from - searchRequest.Size = size - - // summarize responses - var searchResult *bleve.SearchResult - for r := range respChan { - if r.searchResult != nil { - if searchResult == nil { - searchResult = r.searchResult - } else { - searchResult.Merge(r.searchResult) - } - } - if r.err != nil { - s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) - } - } - - // handle case where no results were successful - if searchResult == nil { - searchResult = &bleve.SearchResult{ - Status: &bleve.SearchStatus{ - Errors: make(map[string]error), - }, - } - } - - // sort all hits with the requested order - if len(searchRequest.Sort) > 0 { - sorter := sortutils.NewMultiSearchHitSorter(searchRequest.Sort, searchResult.Hits) - sort.Sort(sorter) - } - - // now skip over the correct From - if searchRequest.From > 0 && len(searchResult.Hits) > searchRequest.From { - searchResult.Hits = searchResult.Hits[searchRequest.From:] - } else if searchRequest.From > 0 { - searchResult.Hits = search.DocumentMatchCollection{} - } - - // now trim to the correct size - if searchRequest.Size > 0 && len(searchResult.Hits) > searchRequest.Size { - searchResult.Hits = searchResult.Hits[0:searchRequest.Size] - } - - // fix up facets - for name, fr := range searchRequest.Facets { - searchResult.Facets.Fixup(name, fr.Size) - } - - // fix up original request - searchResult.Request = searchRequest - searchDuration := time.Since(start) - searchResult.Took = searchDuration - - searchResultAny := &any.Any{} - err = protobuf.UnmarshalAny(searchResult, searchResultAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, err - } - - // response - resp.SearchResult = searchResultAny - - return resp, nil -} diff --git a/dispatcher/http_handler.go b/dispatcher/http_handler.go deleted file mode 100644 index 3e2ec1b..0000000 --- a/dispatcher/http_handler.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - "net/http" - "time" - - "github.com/gorilla/mux" - blasthttp "github.com/mosuka/blast/http" - "github.com/mosuka/blast/version" - "github.com/prometheus/client_golang/prometheus/promhttp" - "go.uber.org/zap" -) - -type Router struct { - mux.Router - - logger *zap.Logger -} - -func NewRouter(logger *zap.Logger) (*Router, error) { - router := &Router{ - logger: logger, - } - - router.StrictSlash(true) - - router.Handle("/", NewRootHandler(logger)).Methods("GET") - router.Handle("/metrics", promhttp.Handler()).Methods("GET") - - return router, nil -} - -func (r *Router) Close() error { - return nil -} - -type RootHandler struct { - logger *zap.Logger -} - -func NewRootHandler(logger *zap.Logger) *RootHandler { - return &RootHandler{ - logger: logger, - } -} - -func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - msgMap := map[string]interface{}{ - "version": version.Version, - "status": status, - } - - content, err := blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} diff --git a/dispatcher/http_server.go b/dispatcher/http_server.go deleted file mode 100644 index 5d3fbda..0000000 --- a/dispatcher/http_server.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - "net" - "net/http" - - accesslog "github.com/mash/go-accesslog" - "go.uber.org/zap" -) - -type HTTPServer struct { - listener net.Listener - router *Router - - logger *zap.Logger - httpLogger accesslog.Logger -} - -func NewHTTPServer(httpAddr string, router *Router, logger *zap.Logger, httpLogger accesslog.Logger) (*HTTPServer, error) { - listener, err := net.Listen("tcp", httpAddr) - if err != nil { - return nil, err - } - - return &HTTPServer{ - listener: listener, - router: router, - logger: logger, - httpLogger: httpLogger, - }, nil -} - -func (s *HTTPServer) Start() error { - err := http.Serve( - s.listener, - accesslog.NewLoggingHandler( - s.router, - s.httpLogger, - ), - ) - if err != nil { - return err - } - - return nil -} - -func (s *HTTPServer) Stop() error { - err := s.listener.Close() - if err != nil { - return err - } - - return nil -} diff --git a/dispatcher/server.go b/dispatcher/server.go deleted file mode 100644 index 529401e..0000000 --- a/dispatcher/server.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - accesslog "github.com/mash/go-accesslog" - "go.uber.org/zap" -) - -type Server struct { - managerGrpcAddress string - grpcAddress string - grpcGatewayAddress string - httpAddress string - logger *zap.Logger - grpcLogger *zap.Logger - httpLogger accesslog.Logger - - grpcService *GRPCService - grpcServer *GRPCServer - grpcGateway *GRPCGateway - httpRouter *Router - httpServer *HTTPServer -} - -func NewServer(managerGrpcAddress string, grpcAddress string, grpcGatewayAddress string, httpAddress string, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { - return &Server{ - managerGrpcAddress: managerGrpcAddress, - grpcAddress: grpcAddress, - grpcGatewayAddress: grpcGatewayAddress, - httpAddress: httpAddress, - logger: logger, - grpcLogger: grpcLogger, - httpLogger: httpLogger, - }, nil -} - -func (s *Server) Start() { - var err error - - // create gRPC service - s.grpcService, err = NewGRPCService(s.managerGrpcAddress, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC server - s.grpcServer, err = NewGRPCServer(s.grpcAddress, s.grpcService, s.grpcLogger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC gateway - s.grpcGateway, err = NewGRPCGateway(s.grpcGatewayAddress, s.grpcAddress, s.logger) - if err != nil { - s.logger.Error(err.Error()) - return - } - - // create HTTP router - s.httpRouter, err = NewRouter(s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create HTTP server - s.httpServer, err = NewHTTPServer(s.httpAddress, s.httpRouter, s.logger, s.httpLogger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // start gRPC service - s.logger.Info("start gRPC service") - go func() { - err := s.grpcService.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - - // start gRPC server - s.logger.Info("start gRPC server") - go func() { - err := s.grpcServer.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - - // start gRPC gateway - s.logger.Info("start gRPC gateway") - go func() { - _ = s.grpcGateway.Start() - }() - - // start HTTP server - s.logger.Info("start HTTP server") - go func() { - _ = s.httpServer.Start() - }() -} - -func (s *Server) Stop() { - s.logger.Info("stop HTTP server") - err := s.httpServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop HTTP router") - err = s.httpRouter.Close() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC gateway") - err = s.grpcGateway.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC server") - err = s.grpcServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC service") - err = s.grpcService.Stop() - if err != nil { - s.logger.Error(err.Error()) - } -} diff --git a/dispatcher/server_test.go b/dispatcher/server_test.go deleted file mode 100644 index dd727d7..0000000 --- a/dispatcher/server_test.go +++ /dev/null @@ -1,610 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - "fmt" - "os" - "path/filepath" - "reflect" - "testing" - "time" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/google/go-cmp/cmp" - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/indexutils" - "github.com/mosuka/blast/logutils" - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf/index" - "github.com/mosuka/blast/protobuf/management" - "github.com/mosuka/blast/testutils" -) - -func TestServer_Start(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("INFO", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerPeerGrpcAddress1 := "" - managerGrpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerGrpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerHttpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerNodeId1 := "manager1" - managerBindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerDataDir1 := testutils.TmpDir() - managerRaftStorageType1 := "boltdb" - - managerNode1 := &management.Node{ - Id: managerNodeId1, - BindAddress: managerBindAddress1, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress1, - GrpcGatewayAddress: managerGrpcGatewayAddress1, - HttpAddress: managerHttpAddress1, - }, - } - - managerIndexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - managerIndexType1 := "upside_down" - managerIndexStorageType1 := "boltdb" - - // create server - managerServer1, err := manager.NewServer(managerPeerGrpcAddress1, managerNode1, managerDataDir1, managerRaftStorageType1, managerIndexMapping1, managerIndexType1, managerIndexStorageType1, logger.Named(managerNodeId1), grpcLogger.Named(managerNodeId1), httpAccessLogger) - defer func() { - if managerServer1 != nil { - managerServer1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - managerServer1.Start() - - // sleep - time.Sleep(5 * time.Second) - - managerPeerGrpcAddress2 := managerGrpcAddress1 - managerGrpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerGrpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerHttpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerNodeId2 := "manager2" - managerBindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerDataDir2 := testutils.TmpDir() - managerRaftStorageType2 := "boltdb" - - managerNode2 := &management.Node{ - Id: managerNodeId2, - BindAddress: managerBindAddress2, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress2, - GrpcGatewayAddress: managerGrpcGatewayAddress2, - HttpAddress: managerHttpAddress2, - }, - } - - managerIndexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - managerIndexType2 := "upside_down" - managerIndexStorageType2 := "boltdb" - - // create server - managerServer2, err := manager.NewServer(managerPeerGrpcAddress2, managerNode2, managerDataDir2, managerRaftStorageType2, managerIndexMapping2, managerIndexType2, managerIndexStorageType2, logger.Named(managerNodeId2), grpcLogger.Named(managerNodeId2), httpAccessLogger) - defer func() { - if managerServer2 != nil { - managerServer2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - managerServer2.Start() - - // sleep - time.Sleep(5 * time.Second) - - managerPeerGrpcAddress3 := managerGrpcAddress1 - managerGrpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerGrpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerHttpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerNodeId3 := "manager3" - managerBindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerDataDir3 := testutils.TmpDir() - managerRaftStorageType3 := "boltdb" - - managerNode3 := &management.Node{ - Id: managerNodeId3, - BindAddress: managerBindAddress3, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress3, - GrpcGatewayAddress: managerGrpcGatewayAddress3, - HttpAddress: managerHttpAddress3, - }, - } - - managerIndexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - managerIndexType3 := "upside_down" - managerIndexStorageType3 := "boltdb" - - // create server - managerServer3, err := manager.NewServer(managerPeerGrpcAddress3, managerNode3, managerDataDir3, managerRaftStorageType3, managerIndexMapping3, managerIndexType3, managerIndexStorageType3, logger.Named(managerNodeId3), grpcLogger.Named(managerNodeId3), httpAccessLogger) - defer func() { - if managerServer3 != nil { - managerServer3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - managerServer3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for manager1 - managerClient1, err := manager.NewGRPCClient(managerNode1.Metadata.GrpcAddress) - defer func() { - _ = managerClient1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - // get cluster info from manager1 - resClusterInfo, err := managerClient1.ClusterInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expManagerCluster1 := &management.Cluster{ - Nodes: map[string]*management.Node{ - managerNodeId1: { - Id: managerNodeId1, - BindAddress: managerBindAddress1, - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress1, - GrpcGatewayAddress: managerGrpcGatewayAddress1, - HttpAddress: managerHttpAddress1, - }, - }, - managerNodeId2: { - Id: managerNodeId2, - BindAddress: managerBindAddress2, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress2, - GrpcGatewayAddress: managerGrpcGatewayAddress2, - HttpAddress: managerHttpAddress2, - }, - }, - managerNodeId3: { - Id: managerNodeId3, - BindAddress: managerBindAddress3, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress3, - GrpcGatewayAddress: managerGrpcGatewayAddress3, - HttpAddress: managerHttpAddress3, - }, - }, - }, - } - actManagerCluster1 := resClusterInfo.Cluster - if !reflect.DeepEqual(expManagerCluster1, actManagerCluster1) { - t.Fatalf("expected content to see %v, saw %v", expManagerCluster1, actManagerCluster1) - } - - // - // indexer cluster1 - // - indexerManagerGrpcAddress1 := managerGrpcAddress1 - indexerShardId1 := "shard1" - indexerPeerGrpcAddress1 := "" - indexerGrpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerGrpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerHttpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerNodeId1 := "indexer1" - indexerBindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerDataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(indexerDataDir1) - }() - indexerRaftStorageType1 := "boltdb" - - indexerNode1 := &index.Node{ - Id: indexerNodeId1, - BindAddress: indexerBindAddress1, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress1, - GrpcGatewayAddress: indexerGrpcGatewayAddress1, - HttpAddress: indexerHttpAddress1, - }, - } - indexerIndexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexerIndexType1 := "upside_down" - indexerIndexStorageType1 := "boltdb" - indexerServer1, err := indexer.NewServer(indexerManagerGrpcAddress1, indexerShardId1, indexerPeerGrpcAddress1, indexerNode1, indexerDataDir1, indexerRaftStorageType1, indexerIndexMapping1, indexerIndexType1, indexerIndexStorageType1, logger.Named(indexerNodeId1), grpcLogger.Named(indexerNodeId1), httpAccessLogger) - defer func() { - indexerServer1.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - indexerServer1.Start() - - // sleep - time.Sleep(5 * time.Second) - - indexerManagerGrpcAddress2 := managerGrpcAddress1 - indexerShardId2 := "shard1" - indexerPeerGrpcAddress2 := "" - indexerGrpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerGrpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerHttpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerNodeId2 := "indexer2" - indexerBindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerDataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(indexerDataDir2) - }() - indexerRaftStorageType2 := "boltdb" - - indexerNode2 := &index.Node{ - Id: indexerNodeId2, - BindAddress: indexerBindAddress2, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress2, - GrpcGatewayAddress: indexerGrpcGatewayAddress2, - HttpAddress: indexerHttpAddress2, - }, - } - indexerIndexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexerIndexType2 := "upside_down" - indexerIndexStorageType2 := "boltdb" - indexerServer2, err := indexer.NewServer(indexerManagerGrpcAddress2, indexerShardId2, indexerPeerGrpcAddress2, indexerNode2, indexerDataDir2, indexerRaftStorageType2, indexerIndexMapping2, indexerIndexType2, indexerIndexStorageType2, logger.Named(indexerNodeId2), grpcLogger.Named(indexerNodeId2), httpAccessLogger) - defer func() { - indexerServer2.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - indexerServer2.Start() - - // sleep - time.Sleep(5 * time.Second) - - indexerManagerGrpcAddress3 := managerGrpcAddress1 - indexerShardId3 := "shard1" - indexerPeerGrpcAddress3 := "" - indexerGrpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerGrpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerHttpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerNodeId3 := "indexer3" - indexerBindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerDataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(indexerDataDir3) - }() - indexerRaftStorageType3 := "boltdb" - - indexerNode3 := &index.Node{ - Id: indexerNodeId3, - BindAddress: indexerBindAddress3, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress3, - GrpcGatewayAddress: indexerGrpcGatewayAddress3, - HttpAddress: indexerHttpAddress3, - }, - } - indexerIndexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexerIndexType3 := "upside_down" - indexerIndexStorageType3 := "boltdb" - indexerServer3, err := indexer.NewServer(indexerManagerGrpcAddress3, indexerShardId3, indexerPeerGrpcAddress3, indexerNode3, indexerDataDir3, indexerRaftStorageType3, indexerIndexMapping3, indexerIndexType3, indexerIndexStorageType3, logger.Named(indexerNodeId3), grpcLogger.Named(indexerNodeId3), httpAccessLogger) - defer func() { - indexerServer3.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - indexerServer3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for manager1 - indexerClient1, err := indexer.NewGRPCClient(indexerNode1.Metadata.GrpcAddress) - defer func() { - _ = indexerClient1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - // get cluster info from manager1 - resClusterInfoIndexer1, err := indexerClient1.ClusterInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expIndexerCluster1 := &index.Cluster{ - Nodes: map[string]*index.Node{ - indexerNodeId1: { - Id: indexerNodeId1, - BindAddress: indexerBindAddress1, - State: index.Node_LEADER, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress1, - GrpcGatewayAddress: indexerGrpcGatewayAddress1, - HttpAddress: indexerHttpAddress1, - }, - }, - indexerNodeId2: { - Id: indexerNodeId2, - BindAddress: indexerBindAddress2, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress2, - GrpcGatewayAddress: indexerGrpcGatewayAddress2, - HttpAddress: indexerHttpAddress2, - }, - }, - indexerNodeId3: { - Id: indexerNodeId3, - BindAddress: indexerBindAddress3, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress3, - GrpcGatewayAddress: indexerGrpcGatewayAddress3, - HttpAddress: indexerHttpAddress3, - }, - }, - }, - } - actIndexerCluster1 := resClusterInfoIndexer1.Cluster - if !cmp.Equal(expIndexerCluster1, actIndexerCluster1) { - t.Fatalf("expected content to see %v, saw %v", expIndexerCluster1, actIndexerCluster1) - } - - // - // indexer cluster2 - // - indexerManagerGrpcAddress4 := managerGrpcAddress1 - indexerShardId4 := "shard2" - indexerPeerGrpcAddress4 := "" - indexerGrpcAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerGrpcGatewayAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerHttpAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerNodeId4 := "indexer4" - indexerBindAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerDataDir4 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(indexerDataDir4) - }() - indexerRaftStorageType4 := "boltdb" - - indexerNode4 := &index.Node{ - Id: indexerNodeId4, - BindAddress: indexerBindAddress4, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress4, - GrpcGatewayAddress: indexerGrpcGatewayAddress4, - HttpAddress: indexerHttpAddress4, - }, - } - indexerIndexMapping4, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexerIndexType4 := "upside_down" - indexerIndexStorageType4 := "boltdb" - indexerServer4, err := indexer.NewServer(indexerManagerGrpcAddress4, indexerShardId4, indexerPeerGrpcAddress4, indexerNode4, indexerDataDir4, indexerRaftStorageType4, indexerIndexMapping4, indexerIndexType4, indexerIndexStorageType4, logger.Named(indexerNodeId4), grpcLogger.Named(indexerNodeId4), httpAccessLogger) - defer func() { - indexerServer4.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - indexerServer4.Start() - - // sleep - time.Sleep(5 * time.Second) - - indexerManagerGrpcAddress5 := managerGrpcAddress1 - indexerShardId5 := "shard2" - indexerPeerGrpcAddress5 := "" - indexerGrpcAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerGrpcGatewayAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerHttpAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerNodeId5 := "indexer5" - indexerBindAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerDataDir5 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(indexerDataDir5) - }() - indexerRaftStorageType5 := "boltdb" - - indexerNode5 := &index.Node{ - Id: indexerNodeId5, - BindAddress: indexerBindAddress5, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress5, - GrpcGatewayAddress: indexerGrpcGatewayAddress5, - HttpAddress: indexerHttpAddress5, - }, - } - indexerIndexMapping5, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexerIndexType5 := "upside_down" - indexerIndexStorageType5 := "boltdb" - indexerServer5, err := indexer.NewServer(indexerManagerGrpcAddress5, indexerShardId5, indexerPeerGrpcAddress5, indexerNode5, indexerDataDir5, indexerRaftStorageType5, indexerIndexMapping5, indexerIndexType5, indexerIndexStorageType5, logger.Named(indexerNodeId5), grpcLogger.Named(indexerNodeId5), httpAccessLogger) - defer func() { - indexerServer5.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - indexerServer5.Start() - - // sleep - time.Sleep(5 * time.Second) - - indexerManagerGrpcAddress6 := managerGrpcAddress1 - indexerShardId6 := "shard2" - indexerPeerGrpcAddress6 := "" - indexerGrpcAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerGrpcGatewayAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerHttpAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerNodeId6 := "indexer6" - indexerBindAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerDataDir6 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(indexerDataDir6) - }() - indexerRaftStorageType6 := "boltdb" - - indexerNode6 := &index.Node{ - Id: indexerNodeId6, - BindAddress: indexerBindAddress6, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress6, - GrpcGatewayAddress: indexerGrpcGatewayAddress6, - HttpAddress: indexerHttpAddress6, - }, - } - indexerIndexMapping6, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexerIndexType6 := "upside_down" - indexerIndexStorageType6 := "boltdb" - indexerServer6, err := indexer.NewServer(indexerManagerGrpcAddress6, indexerShardId6, indexerPeerGrpcAddress6, indexerNode6, indexerDataDir6, indexerRaftStorageType6, indexerIndexMapping6, indexerIndexType6, indexerIndexStorageType6, logger.Named(indexerNodeId6), grpcLogger.Named(indexerNodeId6), httpAccessLogger) - defer func() { - indexerServer6.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - indexerServer6.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for manager1 - indexerClient2, err := indexer.NewGRPCClient(indexerNode4.Metadata.GrpcAddress) - defer func() { - _ = indexerClient1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - // get cluster info from manager1 - indexerCluster2, err := indexerClient2.ClusterInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expIndexerCluster2 := &index.Cluster{ - Nodes: map[string]*index.Node{ - indexerNodeId4: { - Id: indexerNodeId4, - BindAddress: indexerBindAddress4, - State: index.Node_LEADER, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress4, - GrpcGatewayAddress: indexerGrpcGatewayAddress4, - HttpAddress: indexerHttpAddress4, - }, - }, - indexerNodeId5: { - Id: indexerNodeId5, - BindAddress: indexerBindAddress5, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress5, - GrpcGatewayAddress: indexerGrpcGatewayAddress5, - HttpAddress: indexerHttpAddress5, - }, - }, - indexerNodeId6: { - Id: indexerNodeId6, - BindAddress: indexerBindAddress6, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress6, - GrpcGatewayAddress: indexerGrpcGatewayAddress6, - HttpAddress: indexerHttpAddress6, - }, - }, - }, - } - actIndexerCluster2 := indexerCluster2.Cluster - if !reflect.DeepEqual(expIndexerCluster2, actIndexerCluster2) { - t.Fatalf("expected content to see %v, saw %v", expIndexerCluster2, actIndexerCluster2) - } - - // - // dispatcher - // - dispatcherManagerGrpcAddress := managerGrpcAddress1 - dispatcherGrpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dispatcherGrpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dispatcherHttpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - - dispatcher1, err := NewServer(dispatcherManagerGrpcAddress, dispatcherGrpcAddress, dispatcherGrpcGatewayAddress, dispatcherHttpAddress, logger.Named("dispatcher1"), grpcLogger.Named("dispatcher1"), httpAccessLogger) - defer func() { - dispatcher1.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - dispatcher1.Start() - - // sleep - time.Sleep(5 * time.Second) -} diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 4d358be..0000000 --- a/docker-compose.yml +++ /dev/null @@ -1,221 +0,0 @@ -version: '3.4' - -networks: - blast-cluster: - driver: bridge - -services: - manager1: - container_name: manager1 - image: mosuka/blast:latest - restart: always - ports: - - 2110:2110 - - 5110:5110 - - 6110:6110 - - 8110:8110 - networks: - - blast-cluster - volumes: - - ./example:/opt/blast/example - command: > - blast manager start - --node-id=blast-manager1 - --node-address=manager1:2110 - --grpc-address=manager1:5110 - --grpc-gateway-address=manager1:6110 - --http-address=manager1:8110 - --data-dir=/tmp/blast/manager1 - --raft-storage-type=boltdb - --index-mapping-file=/opt/blast/example/wiki_index_mapping.json - --index-type=scorch - --index-storage-type=scorch - - indexer1: - container_name: indexer1 - image: mosuka/blast:latest - restart: always - ports: - - 2010:2010 - - 5010:5010 - - 6010:6010 - - 8010:8010 - networks: - - blast-cluster - volumes: - - ./example:/opt/blast/example - depends_on: - - manager1 - command: > - blast indexer start - --manager-grpc-address=manager1:5110 - --shard-id=shard1 - --node-id=blast-indexer1 - --node-address=indexer1:2010 - --grpc-address=indexer1:5010 - --grpc-gateway-address=indexer1:6010 - --http-address=indexer1:8010 - --data-dir=/tmp/blast/indexer1 - --raft-storage-type=boltdb - - indexer2: - container_name: indexer2 - image: mosuka/blast:latest - restart: always - ports: - - 2020:2020 - - 5020:5020 - - 6020:6020 - - 8020:8020 - networks: - - blast-cluster - volumes: - - ./example:/opt/blast/example - depends_on: - - manager1 - command: > - blast indexer start - --manager-grpc-address=manager1:5110 - --shard-id=shard1 - --node-id=blast-indexer2 - --node-address=indexer2:2020 - --grpc-address=indexer2:5020 - --grpc-gateway-address=indexer2:6020 - --http-address=indexer2:8020 - --data-dir=/tmp/blast/indexer2 - --raft-storage-type=boltdb - - indexer3: - container_name: indexer3 - image: mosuka/blast:latest - restart: always - ports: - - 2030:2030 - - 5030:5030 - - 6030:6030 - - 8030:8030 - networks: - - blast-cluster - volumes: - - ./example:/opt/blast/example - depends_on: - - manager1 - command: > - blast indexer start - --manager-grpc-address=manager1:5110 - --shard-id=shard1 - --node-id=blast-indexer3 - --node-address=indexer3:2030 - --grpc-address=indexer3:5030 - --grpc-gateway-address=indexer3:6030 - --http-address=indexer3:8030 - --data-dir=/tmp/blast/indexer3 - --raft-storage-type=boltdb - - indexer4: - container_name: indexer4 - image: mosuka/blast:latest - restart: always - ports: - - 2040:2040 - - 5040:5040 - - 6040:6040 - - 8040:8040 - networks: - - blast-cluster - volumes: - - ./example:/opt/blast/example - depends_on: - - manager1 - command: > - blast indexer start - --manager-grpc-address=manager1:5110 - --shard-id=shard2 - --node-id=blast-indexer4 - --node-address=indexer4:2040 - --grpc-address=indexer4:5040 - --grpc-gateway-address=indexer4:6040 - --http-address=indexer4:8040 - --data-dir=/tmp/blast/indexer4 - --raft-storage-type=boltdb - - indexer5: - container_name: indexer5 - image: mosuka/blast:latest - restart: always - ports: - - 2050:2050 - - 5050:5050 - - 6050:6050 - - 8050:8050 - networks: - - blast-cluster - volumes: - - ./example:/opt/blast/example - depends_on: - - manager1 - command: > - blast indexer start - --manager-grpc-address=manager1:5110 - --shard-id=shard2 - --node-id=blast-indexer5 - --node-address=indexer5:2050 - --grpc-address=indexer5:5050 - --grpc-gateway-address=indexer5:6050 - --http-address=indexer5:8050 - --data-dir=/tmp/blast/indexer5 - --raft-storage-type=boltdb - - indexer6: - container_name: indexer6 - image: mosuka/blast:latest - restart: always - ports: - - 2060:2060 - - 5060:5060 - - 6060:6060 - - 8060:8060 - networks: - - blast-cluster - volumes: - - ./example:/opt/blast/example - depends_on: - - manager1 - command: > - blast indexer start - --manager-grpc-address=manager1:5110 - --shard-id=shard2 - --node-id=blast-indexer6 - --node-address=indexer6:2060 - --grpc-address=indexer6:5060 - --grpc-gateway-address=indexer6:6060 - --http-address=indexer6:8060 - --data-dir=/tmp/blast/indexer6 - --raft-storage-type=boltdb - - dispatcher1: - container_name: dispatcher1 - image: mosuka/blast:latest - restart: always - ports: - - 5210:5210 - - 6210:6210 - - 8210:8210 - networks: - - blast-cluster - volumes: - - ./example:/opt/blast/example - depends_on: - - manager1 - - indexer1 - - indexer2 - - indexer3 - - indexer4 - - indexer5 - - indexer6 - command: > - blast dispatcher start - --manager-grpc-address=manager1:5110 - --grpc-address=dispatcher1:5210 - --grpc-gateway-address=dispatcher1:6210 - --http-address=dispatcher1:8210 diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh deleted file mode 100755 index 1cf687a..0000000 --- a/docker-entrypoint.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# Copyright (c) 2019 Minoru Osuka -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e - -exec "$@" diff --git a/errors/errors.go b/errors/errors.go index cc538f6..fcdf16f 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -1,23 +1,15 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package errors import "errors" var ( - ErrNotFoundLeader = errors.New("does not found leader") - ErrNotFound = errors.New("not found") - ErrTimeout = errors.New("timeout") + ErrNotFoundLeader = errors.New("does not found leader") + ErrNodeAlreadyExists = errors.New("node already exists") + ErrNodeDoesNotExist = errors.New("node does not exist") + ErrNodeNotReady = errors.New("node not ready") + ErrNotFound = errors.New("not found") + ErrTimeout = errors.New("timeout") + ErrNoUpdate = errors.New("no update") + ErrNil = errors.New("data is nil") + ErrUnsupportedEvent = errors.New("unsupported event") ) diff --git a/etc/blast.yaml b/etc/blast.yaml new file mode 100644 index 0000000..ab362c1 --- /dev/null +++ b/etc/blast.yaml @@ -0,0 +1,16 @@ +id: "node1" +raft_address: ":7000" +grpc_address: ":9000" +http_address: ":8000" +data_directory: "/tmp/blast/node1/data" +#mapping_file: "./etc/blast_mapping.json" +peer_grpc_address: "" +#certificate_file: "./etc/blast-cert.pem" +#key_file: "./etc/blast-key.pem" +#common_name: "localhost" +log_level: "INFO" +log_file: "" +#log_max_size: 500 +#log_max_backups: 3 +#log_max_age: 30 +#log_compress: false diff --git a/example/enwiki_index_mapping.json b/etc/blast_mapping.json similarity index 97% rename from example/enwiki_index_mapping.json rename to etc/blast_mapping.json index 2ef6200..118348c 100644 --- a/example/enwiki_index_mapping.json +++ b/etc/blast_mapping.json @@ -1,10 +1,10 @@ { "types": { - "enwiki": { + "example": { "enabled": true, "dynamic": true, "properties": { - "title_en": { + "title": { "enabled": true, "dynamic": true, "fields": [ @@ -19,7 +19,7 @@ ], "default_analyzer": "en" }, - "text_en": { + "text": { "enabled": true, "dynamic": true, "fields": [ diff --git a/example/geo_doc_2.json b/example/geo_doc_2.json deleted file mode 100644 index 0ca3e13..0000000 --- a/example/geo_doc_2.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "id": "2", - "fields": { - "name": "Capital City Brewing Company", - "city": "Washington", - "state": "District of Columbia", - "code": "20005", - "country": "United States", - "phone": "202.628.2222", - "website": "http://www.capcitybrew.com", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "Washington DC's first brewpub since prohibition, Capitol City Brewing Co. opened its doors in 1992. Our first location still stands in Downtown DC, at 11th and H St., NW. Our company policy is to bring the fine craft of brewing to every person who lives and visits our region, as well as treating them to a wonderful meal and a great experience.", - "address": [ - "1100 New York Ave, NW" - ], - "geo": { - "accuracy": "ROOFTOP", - "lat": 38.8999, - "lon": -77.0272 - } - } -} diff --git a/example/geo_doc_3.json b/example/geo_doc_3.json deleted file mode 100644 index 98c79c5..0000000 --- a/example/geo_doc_3.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "id": "3", - "fields": { - "name": "Firehouse Grill & Brewery", - "city": "Sunnyvale", - "state": "California", - "code": "94086", - "country": "United States", - "phone": "1-408-773-9500", - "website": "", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "", - "address": [ - "111 South Murphy Avenue" - ], - "geo": { - "accuracy": "RANGE_INTERPOLATED", - "lat": 37.3775, - "lon": -122.03 - } - } -} diff --git a/example/geo_doc_4.json b/example/geo_doc_4.json deleted file mode 100644 index fcdc08a..0000000 --- a/example/geo_doc_4.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "id": "4", - "fields": { - "name": "Hook & Ladder Brewing Company", - "city": "Silver Spring", - "state": "Maryland", - "code": "20910", - "country": "United States", - "phone": "301.565.4522", - "website": "http://www.hookandladderbeer.com", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "At Hook & Ladder Brewing we believe in great beer in the company of good friends, so we bring you three great beers for your drinking pleasure (please drink responsibly). Each of our beers is carefully crafted with the finest quality ingredients for a distinctive taste we know you will enjoy. Try one tonight, you just might get hooked. Through our own experiences in the fire and rescue service we have chosen the Hook & Ladder as a symbol of pride and honor to pay tribute to the brave men and women who serve and protect our communities.", - "address": [ - "8113 Fenton St." - ], - "geo": { - "accuracy": "ROOFTOP", - "lat": 38.9911, - "lon": -77.0237 - } - } -} diff --git a/example/geo_doc_5.json b/example/geo_doc_5.json deleted file mode 100644 index e2e6807..0000000 --- a/example/geo_doc_5.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "id": "5", - "fields": { - "name": "Jack's Brewing", - "city": "Fremont", - "state": "California", - "code": "94538", - "country": "United States", - "phone": "1-510-796-2036", - "website": "", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "", - "address": [ - "39176 Argonaut Way" - ], - "geo": { - "accuracy": "ROOFTOP", - "lat": 37.5441, - "lon": -121.988 - } - } -} diff --git a/example/geo_doc_6.json b/example/geo_doc_6.json deleted file mode 100644 index 8ecc9bb..0000000 --- a/example/geo_doc_6.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "id": "6", - "fields": { - "name": "Sweet Water Tavern and Brewery", - "city": "Sterling", - "state": "Virginia", - "code": "20121", - "country": "United States", - "phone": "(703) 449-1108", - "website": "http://www.greatamericanrestaurants.com/sweetMainSter/index.htm", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "", - "address": [ - "45980 Waterview Plaza" - ], - "geo": { - "accuracy": "RANGE_INTERPOLATED", - "lat": 39.0324, - "lon": -77.4097 - } - } -} diff --git a/example/wiki_bulk_delete.txt b/example/wiki_bulk_delete.txt deleted file mode 100644 index 8928994..0000000 --- a/example/wiki_bulk_delete.txt +++ /dev/null @@ -1,36 +0,0 @@ -arwiki_1 -bgwiki_1 -cawiki_1 -cswiki_1 -dawiki_1 -dewiki_1 -elwiki_1 -enwiki_1 -eswiki_1 -fawiki_1 -fiwiki_1 -frwiki_1 -gawiki_1 -glwiki_1 -guwiki_1 -hiwiki_1 -huwiki_1 -hywiki_1 -idwiki_1 -itwiki_1 -jawiki_1 -knwiki_1 -kowiki_1 -mlwiki_1 -nlwiki_1 -nowiki_1 -pswiki_1 -ptwiki_1 -rowiki_1 -ruwiki_1 -svwiki_1 -tawiki_1 -tewiki_1 -thwiki_1 -trwiki_1 -zhwiki_1 diff --git a/example/wiki_bulk_index.jsonl b/example/wiki_bulk_index.jsonl deleted file mode 100644 index 32a0fbd..0000000 --- a/example/wiki_bulk_index.jsonl +++ /dev/null @@ -1,36 +0,0 @@ -{"id":"arwiki_1","fields":{"title_ar":"محرك بحث","text_ar":"محرك البحث (بالإنجليزية: Search engine) هو نظام لإسترجاع المعلومات صمم للمساعدة على البحث عن المعلومات المخزنة على أي نظام حاسوبي. تعرض نتائج البحث عادة على شكل قائمة لأماكن تواجد المعلومات ومرتبة وفق معايير معينة. تسمح محركات البحث باختصار مدة البحث والتغلب على مشكلة أحجام البيانات المتصاعدة (إغراق معلوماتي).","timestamp":"2018-03-25T18:04:00Z","_type":"arwiki"}} -{"id":"bgwiki_1","fields":{"title_bg":"Търсачка","text_bg":"Търсачка или търсеща машина (на английски: Web search engine) е специализиран софтуер за извличане на информация, съхранена в компютърна система или мрежа. Това може да е персонален компютър, Интернет, корпоративна мрежа и т.н. Без допълнителни уточнения, най-често под търсачка се разбира уеб(-)търсачка, която търси в Интернет. Други видове търсачки са корпоративните търсачки, които търсят в интранет мрежите, личните търсачки – за индивидуалните компютри и мобилните търсачки. В търсачката потребителят (търсещият) прави запитване за съдържание, отговарящо на определен критерий (обикновено такъв, който съдържа определени думи и фрази). В резултат се получават списък от точки, които отговарят, пълно или частично, на този критерий. Търсачките обикновено използват редовно подновявани индекси, за да оперират бързо и ефикасно. Някои търсачки също търсят в информацията, която е на разположение в нюзгрупите и други големи бази данни. За разлика от Уеб директориите, които се поддържат от хора редактори, търсачките оперират алгоритмично. Повечето Интернет търсачки са притежавани от различни корпорации.","timestamp":"2018-07-11T11:03:00Z","_type":"bgwiki"}} -{"id":"cawiki_1","fields":{"title_ca":"Motor de cerca","text_ca":"Un motor de cerca o de recerca o bé cercador és un programa informàtic dissenyat per ajudar a trobar informació emmagatzemada en un sistema informàtic com ara una xarxa, Internet, un servidor o un ordinador personal. L'objectiu principal és el de trobar altres programes informàtics, pàgines web i documents, entre d'altres. A partir d'una determinada paraula o paraules o una determinada frase l'usuari demana un contingut sota un criteri determinat i retorna una llista de referències que compleixin aquest criteri. El procés es realitza a través de les metadades, vies per comunicar informació que utilitzen els motors per cada cerca. Els índex que utilitzen els cercadors sempre estan actualitzats a través d'un robot web per generar rapidesa i eficàcia en la recerca. Els directoris, en canvi, són gestionats per editors humans.","timestamp":"2018-07-09T18:07:00Z","_type":"cawiki"}} -{"id":"cswiki_1","fields":{"title_cs":"Vyhledávač","text_cs":"Vyhledávač je počítačový systém či program, který umožňuje uživateli zadat nějaký libovolný nebo specifikovaný vyhledávaný výraz a získat z velkého objemu dat informace, které jsou v souladu s tímto dotazem. Jako vyhledávač se označují i ​​webové stránky, jejichž hlavní funkcí je poskytování takového systému či programu. Jako internetový vyhledávač se označuje buď vyhledávač, na který se přistupuje přes internet, nebo vyhledávač, jehož zdrojem vyhledávání je internet (tj. WWW, Usenet apod.). Jako online vyhledávač se označuje vyhledávač, při jehož výkonu činnosti dochází k výměně dat v rámci nějaké počítačové sítě, nejčastěji to je internetový vyhledávač. Fulltextový vyhledávač je vyhedávač, který vykonává fulltextové vyhledávání.","timestamp":"2017-11-10T21:59:00Z","_type":"cswiki"}} -{"id":"dawiki_1","fields":{"title_da":"Søgemaskine","text_da":"En søgemaskine er en applikation til at hjælpe en bruger med at finde information. Det kan f.eks. være at finde filer med bestemte data (f.eks. ord), gemt i en computers hukommelse, for eksempel via World Wide Web (kaldes så en websøgemaskine). Ofte bruges søgemaskine fejlagtigt om linkkataloger eller Netguider.","timestamp":"2017-09-04T01:54:00Z","_type":"dawiki"}} -{"id":"dewiki_1","fields":{"title_de":"Suchmaschine","text_de":"Eine Suchmaschine ist ein Programm zur Recherche von Dokumenten, die in einem Computer oder einem Computernetzwerk wie z. B. dem World Wide Web gespeichert sind. Internet-Suchmaschinen haben ihren Ursprung in Information-Retrieval-Systemen. Sie erstellen einen Schlüsselwort-Index für die Dokumentbasis, um Suchanfragen über Schlüsselwörter mit einer nach Relevanz geordneten Trefferliste zu beantworten. Nach Eingabe eines Suchbegriffs liefert eine Suchmaschine eine Liste von Verweisen auf möglicherweise relevante Dokumente, meistens dargestellt mit Titel und einem kurzen Auszug des jeweiligen Dokuments. Dabei können verschiedene Suchverfahren Anwendung finden.","timestamp":"2017-09-04T01:54:00Z","_type":"dewiki"}} -{"id":"elwiki_1","fields":{"title_el":"Μηχανή αναζήτησης","text_el":"Μια μηχανή αναζήτησης είναι μια εφαρμογή που επιτρέπει την αναζήτηση κειμένων και αρχείων στο Διαδίκτυο. Αποτελείται από ένα πρόγραμμα υπολογιστή που βρίσκεται σε έναν ή περισσότερους υπολογιστές στους οποίους δημιουργεί μια βάση δεδομένων με τις πληροφορίες που συλλέγει από το διαδίκτυο, και το διαδραστικό περιβάλλον που εμφανίζεται στον τελικό χρήστη ο οποίος χρησιμοποιεί την εφαρμογή από άλλον υπολογιστή συνδεδεμένο στο διαδίκτυο. Οι μηχανές αναζήτησης αποτελούνται από 3 είδη λογισμικού, το spider software, το index software και το query software.","timestamp":"2017-11-21T19:57:00Z","_type":"elwiki"}} -{"id":"enwiki_1","fields":{"title_en":"Search engine (computing)","text_en":"A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.","timestamp":"2018-07-04T05:41:00Z","_type":"enwiki"}} -{"id":"eswiki_1","fields":{"title_es":"Motor de búsqueda","text_es":"Un motor de búsqueda o buscador es un sistema informático que busca archivos almacenados en servidores web gracias a su spider (también llamado araña web). Un ejemplo son los buscadores de Internet (algunos buscan únicamente en la web, pero otros lo hacen además en noticias, servicios como Gopher, FTP, etc.) cuando se pide información sobre algún tema. Las búsquedas se hacen con palabras clave o con árboles jerárquicos por temas; el resultado de la búsqueda «Página de resultados del buscador» es un listado de direcciones web en los que se mencionan temas relacionados con las palabras clave buscadas. Como operan de forma automática, los motores de búsqueda contienen generalmente más información que los directorios. Sin embargo, estos últimos también han de construirse a partir de búsquedas (no automatizadas) o bien a partir de avisos dados por los creadores de páginas.","timestamp":"2018-08-30T11:30:00Z","_type":"eswiki"}} -{"id":"fawiki_1","fields":{"title_fa":"موتور جستجو (پردازش)","text_fa":"موتور جستجو یا جویشگر، در فرهنگ رایانه، به طور عمومی به برنامه‌ای گفته می‌شود که کلمات کلیدی را در یک سند یا بانک اطلاعاتی جستجو می‌کند. در اینترنت به برنامه‌ای گفته می‌شود که کلمات کلیدی موجود در فایل‌ها و سندهای وب جهانی، گروه‌های خبری، منوهای گوفر و آرشیوهای FTP را جستجو می‌کند. جویشگرهای زیادی وجود دارند که امروزه از معروفترین و پراستفاده‌ترین آنها می‌توان به google و یاهو! جستجو اشاره کرد.","timestamp":"2017-01-06T02:46:00Z","_type":"fawiki"}} -{"id":"fiwiki_1","fields":{"title_fi":"Hakukone","text_fi":"Hakukone on web-pohjainen ohjelma, joka etsii jatkuvasti Internetistä (varsinkin Webistä) uusia sivuja eritellen ja liittäen ne hakemistoonsa erityisten hakusanojen mukaan. Näitä hyväksi käyttäen hakukone tulostaa käyttäjän syöttämiä hakusanoja lähimpänä olevat sivut. Analysointi tapahtuu käytännössä eri hakukoneissa erilaisilla menetelmillä.","timestamp":"2017-10-04T14:33:00Z","_type":"fiwiki"}} -{"id":"frwiki_1","fields":{"title_fr":"Moteur de recherche","text_fr":"Un moteur de recherche est une application web permettant de trouver des ressources à partir d'une requête sous forme de mots. Les ressources peuvent être des pages web, des articles de forums Usenet, des images, des vidéos, des fichiers, etc. Certains sites web offrent un moteur de recherche comme principale fonctionnalité ; on appelle alors « moteur de recherche » le site lui-même. Ce sont des instruments de recherche sur le web sans intervention humaine, ce qui les distingue des annuaires. Ils sont basés sur des « robots », encore appelés « bots », « spiders «, « crawlers » ou « agents », qui parcourent les sites à intervalles réguliers et de façon automatique pour découvrir de nouvelles adresses (URL). Ils suivent les liens hypertextes qui relient les pages les unes aux autres, les uns après les autres. Chaque page identifiée est alors indexée dans une base de données, accessible ensuite par les internautes à partir de mots-clés. C'est par abus de langage qu'on appelle également « moteurs de recherche » des sites web proposant des annuaires de sites web : dans ce cas, ce sont des instruments de recherche élaborés par des personnes qui répertorient et classifient des sites web jugés dignes d'intérêt, et non des robots d'indexation. Les moteurs de recherche ne s'appliquent pas qu'à Internet : certains moteurs sont des logiciels installés sur un ordinateur personnel. Ce sont des moteurs dits « de bureau » qui combinent la recherche parmi les fichiers stockés sur le PC et la recherche parmi les sites Web — on peut citer par exemple Exalead Desktop, Google Desktop et Copernic Desktop Search, Windex Server, etc. On trouve également des métamoteurs, c'est-à-dire des sites web où une même recherche est lancée simultanément sur plusieurs moteurs de recherche, les résultats étant ensuite fusionnés pour être présentés à l'internaute. On peut citer dans cette catégorie Ixquick, Mamma, Kartoo, Framabee ou Lilo.","timestamp":"2018-05-30T15:15:00Z","_type":"frwiki"}} -{"id":"gawiki_1","fields":{"title_ga":"Inneall cuardaigh","text_ga":"Acmhainn ar an ngréasán domhanda atá insroichte le brabhsálaí Gréasáin, a chabhraíonn leis an úsáideoir ionaid is eolas a aimsiú. Bíonn na hinnill cuardaigh (Yahoo, Lycos, Google, Ask Jeeves) ag cuardach tríd an ngréasán an t-am ar fad, ag tógáil innéacsanna ábhar éagsúla — mar shampla, ag aimsiú teidil, fotheidil, eochairfhocail is céadlínte cáipéisí. Uaidh sin, is féidir cuid mhaith cáipéisí éagsúla ar ábhar ar leith a aisghabháil. Déanann an cuardach leanúnach cinnte de go bhfuil na hinnéacsanna suas chun dáta. Mar sin féin, aisghabhann na hinnill an-chuid cháipéisí nach mbaineann le hábhar, agus tá an-iarracht ar siúl an t-am ar fad iad a fheabhsú.","timestamp":"2013-10-27T18:17:00Z","_type":"gawiki"}} -{"id":"glwiki_1","fields":{"title_gl":"Motor de busca","text_gl":"Un motor de busca ou buscador é un sistema informático que procura arquivos almacenados en servidores web, un exemplo son os buscadores de internet (algúns buscan só na Web pero outros buscan ademais en News, Gopher, FTP etc.) cando lles pedimos información sobre algún tema. As procuras fanse con palabras clave ou con árbores xerárquicas por temas; o resultado da procura é unha listaxe de direccións Web nas que se mencionan temas relacionados coas palabras clave buscadas.","timestamp":"2016-10-31T13:33:00Z","_type":"glwiki"}} -{"id":"guwiki_1","fields":{"title_gu":"વેબ શોધ એન્જીન","text_gu":"વેબ શોધ એન્જિન એ વર્લ્ડ વાઈડ વેબ (World Wide Web) પર વિવિધ માહિતી શોધવા માટે ઉપયોગમાં લેવામાં આવે છે. શોધ લીસ્ટને સામાન્ય રીતે યાદીમાં દર્શાવવામાં આવે છે અને જેને સામાન્ય રીતે હીટ્સ કહેવામાં આવે છે. જે માહિતી મળે છે તેમાં વેબ પૃષ્ઠ (web page), છબીઓ, માહિતી અને અન્ય પ્રકારની ફાઈલો હોય છે. કેટલાક શોધ એન્જિનો ન્યુઝબુક, ડેટાબેઝ અને અન્ય પ્રકારની ઓપન ડીરેક્ટરી (open directories)ઓની વિગતો પણ આપે છે. વ્યકિતઓ દ્વારા દુરસ્ત થતી વેબ ડાયરેક્ટરીઝ (Web directories)થી અલગ રીતે, શોધ એન્જિન ઍલ્ગરિધમનો અથવા ઍલ્ગરિધમ (algorithmic) અને માનવીય બાબતોના મિક્ષણનો ઉપયોગ કરે છે.","timestamp":"2013-04-04T19:28:00Z","_type":"guwiki"}} -{"id":"hiwiki_1","fields":{"title_hi":"खोज इंजन","text_hi":"ऐसे कम्प्यूटर प्रोग्राम खोजी इंजन (search engine) कहलाते हैं जो किसी कम्प्यूटर सिस्टम पर भण्डारित सूचना में से वांछित सूचना को ढूढ निकालते हैं। ये इंजन प्राप्त परिणामों को प्रायः एक सूची के रूप में प्रस्तुत करते हैं जिससे वांछित सूचना की प्रकृति और उसकी स्थिति का पता चलता है। खोजी इंजन किसी सूचना तक अपेक्षाकृत बहुत कम समय में पहुँचने में हमारी सहायता करते हैं। वे 'सूचना ओवरलोड' से भी हमे बचाते हैं। खोजी इंजन का सबसे प्रचलित रूप 'वेब खोजी इंजन' है जो वर्ल्ड वाइड वेब पर सूचना खोजने के लिये प्रयुक्त होता है।","timestamp":"2017-10-19T20:09:00Z","_type":"hiwiki"}} -{"id":"huwiki_1","fields":{"title_hu":"Keresőmotor","text_hu":"A keresőmotor az informatikában egy program vagy alkalmazás, amely bizonyos feltételeknek (többnyire egy szónak vagy kifejezésnek) megfelelő információkat keres valamilyen számítógépes környezetben. Ez a cikk a World Wide Weben (és esetleg az internet más részein, például a Useneten) kereső alkalmazásokról szól, a keresőmotor kifejezés önmagában általában ezekre vonatkozik. Másfajta keresőmotorokra példák a vállalati keresőmotorok, amik egy intraneten, és a személyi keresőmotorok, amik egy személyi számítógép állományai között keresnek.","timestamp":"2018-05-15T20:40:00Z","_type":"huwiki"}} -{"id":"hywiki_1","fields":{"title_hy":"Որոնողական համակարգ","text_hy":"Որոնողական համակարգը գործիք է, որը նախատեսված է համապատասխան բառերով Համաշխարհային ցանցում որոնումներ կատարելու համար։ Ստեղծված է համացանցում և FTP սերվերներում ինֆորմացիա փնտրելու համար։ Փնտրված արդյունքները ընդհանրապես ներկայացվում են արդյունքների ցանկում և սովորաբար կոչվում են նպատակակակետ, հիթ։ Ինֆորմացիան կարող է բաղկացած լինել վեբ էջերից, նկարներից, ինֆորմացիաներից և այլ տիպի ֆայլերից ու տվյալներից։ Այն կարող է օգտագործվել տարբեր տեսակի տեղեկատվություն որոնելու համար, ներառյալ՝ կայքեր, ֆորումներ, նկարներ, վիդեոներ, ֆայլեր և այլն։ Որոշ կայքեր արդեն իրենցից ներկայացնում են ինչ-որ որոնողական համակարգ, օրինակ՝ Dailymotion, YouTube և Google Videos ինտերնետում տեղադրված տեսահոլովակների որոնողական կայքեր են։ Որոնողական կայքը բաղկացած է \"ռոբոտներից\", որոնց անվանում են նաև bot, spider, crawler, որոնք ավտոմատ կերպով, առանց մարդկային միջամտության պարբերաբար հետազոտում են կայքերը։ Որոնողական կայքերը հետևում են հղումներին, որոնք կապված լինելով իրար հետ ինդեքսավորում է յուրաքանչյուր էջ տվյալների բազայում՝ հետագայում բանալի բառերի օգնությամբ դառնալով հասանելի ինտերնետից օգտվողների համար։ Սխալմամբ, որոնողական կայքեր են անվանում նաև այն կայքերը, որոնք իրենցից ներկայացնում են կայքային տեղեկատուներ։ Այս կայքերում ուշադրության արժանի կայքերը ցուցակագրվում և դասակարգվում են մարդկային ռեսուրսների շնորհիվ, այլ ոչ թե բոտերի կամ ռոբետների միջոցով։ Այդ կայքերից կարելի է նշել օրինակ՝ Yahoo!։ Yahoo!-ի որոնողական կայքը գտնվում է այստեղ։ Բոլոր որոնողական համակարգերը նախատեսված են ինտերնետում որոնում իրականացնելու համար, սակայն կան որոշ որոնողական համակարգերի տարատեսակներ, որոնք համակարգչային ծրագրեր են և հետևաբար տեղակայվում են համակարգչի մեջ։ Այս համակարգերը կոչվում են desktop։ Վերջիներս հնարավորություն են տալիս որոնելու թե համակարգչի մեջ կուտակված ֆայլեը, թե կայքերում տեղադրված ռեսուրսները։ Այդ ծրագրերից ամենահայտնիներն են՝ Exalead Desktop, Copernic Desktop Search Գոյություն ունեն նաև մետա-որոնողական համակարգեր, այսինքն կայքեր, որ նույն որոնումը կատարում են միաժամանակ տարբեր որոնողական կայքերի միջնորդությամբ։ Որոնման արդյունքները հետո դասակարգվում են որպեսզի ներկայացվեն օգտագործողին։ Մետա-որոնողական համակարգերի շարքից կարելի է թվարկել օրինակ՝ Mamma և Kartoo։","timestamp":"2017-11-20T17:47:00Z","_type":"hywiki"}} -{"id":"idwiki_1","fields":{"title_id":"Mesin pencari web","text_id":"Mesin pencari web atau mesin telusur web (bahasa Inggris: web search engine) adalah program komputer yang dirancang untuk melakukan pencarian atas berkas-berkas yang tersimpan dalam layanan www, ftp, publikasi milis, ataupun news group dalam sebuah ataupun sejumlah komputer peladen dalam suatu jaringan. Mesin pencari merupakan perangkat penelusur informasi dari dokumen-dokumen yang tersedia. Hasil pencarian umumnya ditampilkan dalam bentuk daftar yang seringkali diurutkan menurut tingkat akurasi ataupun rasio pengunjung atas suatu berkas yang disebut sebagai hits. Informasi yang menjadi target pencarian bisa terdapat dalam berbagai macam jenis berkas seperti halaman situs web, gambar, ataupun jenis-jenis berkas lainnya. Beberapa mesin pencari juga diketahui melakukan pengumpulan informasi atas data yang tersimpan dalam suatu basis data ataupun direktori web. Sebagian besar mesin pencari dijalankan oleh perusahaan swasta yang menggunakan algoritme kepemilikan dan basis data tertutup, di antaranya yang paling populer adalah safari Google (MSN Search dan Yahoo!). Telah ada beberapa upaya menciptakan mesin pencari dengan sumber terbuka (open source), contohnya adalah Htdig, Nutch, Egothor dan OpenFTS.","timestamp":"2017-11-20T17:47:00Z","_type":"idwiki"}} -{"id":"itwiki_1","fields":{"title_it":"Motore di ricerca","text_it":"Nell'ambito delle tecnologie di Internet, un motore di ricerca (in inglese search engine) è un sistema automatico che, su richiesta, analizza un insieme di dati (spesso da esso stesso raccolti) e restituisce un indice dei contenuti disponibili[1] classificandoli in modo automatico in base a formule statistico-matematiche che ne indichino il grado di rilevanza data una determinata chiave di ricerca. Uno dei campi in cui i motori di ricerca trovano maggiore utilizzo è quello dell'information retrieval e nel web. I motori di ricerca più utilizzati nel 2017 sono stati: Google, Bing, Baidu, Qwant, Yandex, Ecosia, DuckDuckGo.","timestamp":"2018-07-16T12:20:00Z","_type":"itwiki"}} -{"id":"jawiki_1","fields":{"title_ja":"検索エンジン","text_ja":"検索エンジン(けんさくエンジン、英語: search engine)は、狭義にはインターネットに存在する情報(ウェブページ、ウェブサイト、画像ファイル、ネットニュースなど)を検索する機能およびそのプログラム。インターネットの普及初期には、検索としての機能のみを提供していたウェブサイトそのものを検索エンジンと呼んだが、現在では様々なサービスが加わったポータルサイト化が進んだため、検索をサービスの一つとして提供するウェブサイトを単に検索サイトと呼ぶことはなくなっている。広義には、インターネットに限定せず情報を検索するシステム全般を含む。狭義の検索エンジンは、ロボット型検索エンジン、ディレクトリ型検索エンジン、メタ検索エンジンなどに分類される。広義の検索エンジンとしては、ある特定のウェブサイト内に登録されているテキスト情報の全文検索機能を備えたソフトウェア(全文検索システム)等がある。検索エンジンは、検索窓と呼ばれるボックスにキーワードを入力して検索をかけるもので、全文検索が可能なものと不可能なものとがある。検索サイトを一般に「検索エンジン」と呼ぶことはあるが、厳密には検索サイト自体は検索エンジンでない。","timestamp":"2018-05-30T00:52:00Z","_type":"jawiki"}} -{"id":"knwiki_1","fields":{"title_kn":"ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ","text_kn":"ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ ಎಂದರೆ World Wide Webನಲ್ಲಿ ಮಾಹಿತಿ ಹುಡುಕುವುದಕ್ಕಾಗಿ ವಿನ್ಯಾಸಗೊಳಿಸಲಾದ ಒಂದು ಸಾಧನ. ಹುಡುಕಾಟದ ಫಲಿತಾಂಶಗಳನ್ನು ಸಾಮಾನ್ಯವಾಗಿ ಒಂದು ಪಟ್ಟಿಯ ರೂಪದಲ್ಲಿ ಪ್ರಸ್ತುತಪಡಿಸಲಾಗುತ್ತದೆ ಮತ್ತು ಇವನ್ನು ’ಹಿಟ್ಸ್’ ಎಂದು ಕರೆಯಲಾಗುತ್ತದೆ. ಈ ಮಾಹಿತಿಯು ಅನೇಕ ಜಾಲ ಪುಟಗಳು, ಚಿತ್ರಗಳು, ಮಾಹಿತಿ ಹಾಗೂ ಇತರೆ ಕಡತಗಳನ್ನು ಹೊಂದಿರಬಹುದು. ಕೆಲವು ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಬೇರೆ ದತ್ತಸಂಚಯಗಳು ಅಥವಾ ಮುಕ್ತ ಮಾಹಿತಿ ಸೂಚಿಗಳಿಂದ ದತ್ತಾಂಶಗಳ ಗಣಿಗಾರಿಕೆ ಮಾಡಿ ಹೊರತೆಗೆಯುತ್ತವೆ. ಜಾಲ ಮಾಹಿತಿಸೂಚಿಗಳನ್ನು ಸಂಬಂಧಿಸಿದ ಸಂಪಾದಕರು ನಿರ್ವಹಿಸಿದರೆ, ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಗಣನಪದ್ಧತಿಯ ಮೂಲಕ ಅಥವಾ ಗಣನಪದ್ಧತಿ ಮತ್ತು ಮಾನವ ಹೂಡುವಳಿಯ ಮಿಶ್ರಣದ ಮುಖಾಂತರ ಕಾರ್ಯನಿರ್ವಹಿಸುತ್ತವೆ.","timestamp":"2017-10-03T14:13:00Z","_type":"knwiki"}} -{"id":"kowiki_1","fields":{"title_cjk":"검색 엔진","text_cjk":"검색 엔진은 컴퓨터 시스템에 저장된 정보를 찾아주는 것을 도와주도록 설계된 정보 검색 시스템이다. 이러한 검색 결과는 목록으로 표현되는 것이 보통이다. 검색 엔진을 사용하면 정보를 찾는데 필요한 시간을 최소화할 수 있다. 가장 눈에 띄는 형태의 공용 검색 엔진으로는 웹 검색 엔진이 있으며 월드 와이드 웹에서 정보를 찾아준다.","timestamp":"2017-11-19T12:50:00Z","_type":"kowiki"}} -{"id":"mlwiki_1","fields":{"title_ml":"വെബ് സെർച്ച് എഞ്ചിൻ","text_ml":"വേൾഡ് വൈഡ് വെബ്ബിലുള്ള വിവരങ്ങൾ തിരയാനുള്ള ഒരു ഉപാധിയാണ്‌ വെബ് സെർച്ച് എഞ്ചിൻ അഥവാ സെർച്ച് എഞ്ചിൻ. തിരച്ചിൽ ഫലങ്ങൾ സാധാരണായായി ഒരു പട്ടികയായി നൽകുന്നു, തിരച്ചിൽ ഫലങ്ങളെ ഹിറ്റുകൾ എന്നാണ്‌ വിളിച്ചുവരുന്നത്[അവലംബം ആവശ്യമാണ്]. തിരച്ചിൽ ഫലങ്ങളിൽ വെബ് പേജുകൾ, ചിത്രങ്ങൾ, വിവരങ്ങൾ, വെബ്ബിലുള്ള മറ്റ് ഫയൽ തരങ്ങൾ എന്നിവ ഉൾപ്പെടാം. അൽഗോരിതങ്ങൾ ഉപയോഗിച്ചാണ് സെർച്ച് എഞ്ചിനുകൾ പ്രവർത്തിക്കുന്നത്.","timestamp":"2010-05-05T15:06:00Z","_type":"mlwiki"}} -{"id":"nlwiki_1","fields":{"title_nl":"Zoekmachine","text_nl":"Een zoekmachine is een computerprogramma waarmee informatie kan worden gezocht in een bepaalde collectie; dit kan een bibliotheek, het internet, of een persoonlijke verzameling zijn. Zonder nadere aanduiding wordt meestal een webdienst bedoeld waarmee met behulp van vrije trefwoorden volledige tekst (full text) kan worden gezocht in het gehele wereldwijde web. In tegenstelling tot startpagina's of webgidsen is er geen of zeer weinig menselijke tussenkomst nodig; het bezoeken van de webpagina's en het sorteren van de rangschikkingen gebeurt met behulp van een algoritme. Google is wereldwijd de meest gebruikte zoekmachine, andere populaire zoekmachines zijn Yahoo!, Bing en Baidu.","timestamp":"2018-05-07T11:05:00Z","_type":"nlwiki"}} -{"id":"nowiki_1","fields":{"title_no":"Søkemotor","text_no":"En søkemotor er en type programvare som leter frem informasjon fra Internett (nettsider eller andre nettressurser) eller begrenset til et datasystem, der informasjonen samsvarer med et gitt søk, og rangerer treffene etter hva den oppfatter som mest relevant. Typisk ligger søkemotoren tilgjengelig som et nettsted, der brukeren legger inn søkeord ev. sammen med filterinnstillinger, og treffene vises gjerne som klikkbare lenker. Søkemotoren kan enten gjøre søk på hele Internett (for eksempel Google, Bing, Kvasir og Yahoo!), innenfor et bestemt nettsted (for eksempel søk innenfor VGs nettavis), eller innenfor et bestemt tema (f.eks. Kelkoo, som søker etter priser på produkter, og Picsearch, som søker etter bilder). En bedrift kan også sette opp en intern bedrifts-søkemotor for å få enklere tilgang til alle dokumenter og databaser i bedriften.","timestamp":"2018-02-05T14:15:00Z","_type":"nowiki"}} -{"id":"pswiki_1","fields":{"title_ps":"انټرنټ لټوونکی ماشين","text_ps":"نټرنټ د معلوماتو يوه داسې پراخه نړۍ ده چې يوه پوله هم نه لري. هره ثانيه په زرگونو معلوماتي توکي په کې ورځای کېږي، خو بيا هم د ډکېدو کومه اندېښنه نه رامنځته کېږي. حيرانوونکې خبره بيا دا ده چې دغه ټول معلومات په داسې مهارت سره په دغه نړۍ کې ځای شوي دي، چې سړی يې د سترگو په رپ کې د نړۍ په هر گوټ کې ترلاسه کولای شي. د کيبورډ په يو دوو تڼيو زور کولو او د موږك په يو دوو کليکونو سره خپلو ټولو پوښتنو ته ځواب موندلای شئ. ټول معلومات په ځانگړو انټرنټ پاڼو کې خوندي وي، نو که سړي ته د يوې پاڼې پته معلومه وي نو سم له لاسه به دغه پاڼه د انټرنټ پاڼو په کتونکي پروگرام کې پرانيزي، خو که سړی بيا يو معلومات غواړي او د هغې پاڼې پته ورسره نه وي، چې دغه ځانگړي معلومات په كې ځای شوي دي، نو بيا سړی يوه داسې پياوړي ځواک ته اړتيا لري، چې د سترگو په رپ کې ټول انټرنټ چاڼ کړي او دغه ځانگړي معلومات راوباسي. له نېکه مرغه د دغه ځواک غم خوړل شوی دی او ډېرInternet Search Engine انټرنټ لټوونکي ماشينونه جوړ کړای شوي دي، چې په وړيا توگه ټول انټرنټ تر ثانيو هم په لږ وخت کې چاڼ کوي او زموږ د خوښې معلومات راښکاره کوي. دغو ماشينونو ته سړی يوه ځانگړې کليمه ورکوي او هغوی ټول انټرنټ په دغې وركړل شوې کلمې پسې لټوي او هر دقيق معلومات چې لاسته ورځي، نو د کمپيوټر پر پرده يې راښکاره کوي. د دغو ماشينونو په ډله کې يو پياوړی ماشين د Google په نوم دی. د نوموړي ماشين بنسټ په ١٩٩٨م کال کې د متحدو ايالاتو د Standford پوهنتون دوو محصلينو Larry Page او Sergey Brin کښېښود. د دغه ماشين خدمات سړی د www.google.com په انټرنټ پاڼه کې کارولای شي. نوموړی ماشين د نړۍ په گڼ شمېر ژبو باندې خدمات وړاندې کوي او داسې چټک او دقيق لټون کوي چې د انټرنټ نور ډېر غښتلي ماشينونه ورته گوته پر غاښ پاتې دي. گوگل په ټوله نړۍ کې کارول کېږي او تر نيمي ثانيي هم په لنډ وخت کې په ميليارډونو انټرنټ پاڼې چاڼ کوي او خپلو کاروونکو ته په پرتله ييزه توگه دقيق معلومات راباسي. گوگل په يوه ورځ کې څه كمُ ٢٠٠ ميليونه پوښتنې ځوابوي. دا ( گوگل) تورى خپله د يو امريکايي رياضيپوه د وراره له خوا په لومړي ځل د يوې لوبې لپاره کارول شوی و. هغه دغه تورى د يو سلو صفرونو ( 1000?.) غوندې لوی عدد ته د نوم په توگه کاراوه. دغه نوم د نوموړي شرکت د دغه توان ښكارندوى دى، چې په لنډ وخت کې په لويه کچه پوښتنو ته ځواب ورکوي او معلومات لټوي. سړی چې د گوگل چټکتيا او دقيقوالي ته ځير شي، نو دا پوښته راپورته کېږي چې د دې ماشين شا ته به څومره پرمختللي کمپيوټرونه او پياوړی تخنيک پټ وي. خو اصلاً د گوگل شا ته په يوه لوی جال کې د منځنۍ بيې کمپيوټرونه سره نښلول شوي دي . په دې توگه په زرگونو کمپيوټرونه هممهاله په کار بوخت وي، چې په ترڅ کې يې د معلوماتو لټول او چاڼ کول چټکتيا مومي. د يوې پوښتنې له اخيستلو څخه راواخله معلوماتو تر لټولو او بيا د دقيقوالي له مخې په يوه ځانگړي طرز بېرته کاروونکي يا پوښتونكي تر ښوولو پورې ټولې چارې د درېيو Software پروگرامونه په لاس کې دي، چې په دغه زرگونو کمپيوټرونو کې ځای پر ځای شوي دي.","timestamp":"2015-12-15T18:53:00Z","_type":"pswiki"}} -{"id":"ptwiki_1","fields":{"title_pt":"Motor de busca","text_pt":"Motor de pesquisa (português europeu) ou ferramenta de busca (português brasileiro) ou buscador (em inglês: search engine) é um programa desenhado para procurar palavras-chave fornecidas pelo utilizador em documentos e bases de dados. No contexto da internet, um motor de pesquisa permite procurar palavras-chave em documentos alojados na world wide web, como aqueles que se encontram armazenados em websites. Os motores de busca surgiram logo após o aparecimento da Internet, com a intenção de prestar um serviço extremamente importante: a busca de qualquer informação na rede, apresentando os resultados de uma forma organizada, e também com a proposta de fazer isto de uma maneira rápida e eficiente. A partir deste preceito básico, diversas empresas se desenvolveram, chegando algumas a valer milhões de dólares. Entre as maiores empresas encontram-se o Google, o Yahoo, o Bing, o Lycos, o Cadê e, mais recentemente, a Amazon.com com o seu mecanismo de busca A9 porém inativo. Os buscadores se mostraram imprescindíveis para o fluxo de acesso e a conquista novos visitantes. Antes do advento da Web, havia sistemas para outros protocolos ou usos, como o Archie para sites FTP anônimos e o Veronica para o Gopher (protocolo de redes de computadores que foi desenhado para indexar repositórios de documentos na Internet, baseado-se em menus).","timestamp":"2017-11-09T14:38:00Z","_type":"ptwiki"}} -{"id":"rowiki_1","fields":{"title_ro":"Motor de căutare","text_ro":"Un motor de căutare este un program apelabil căutător, care accesează Internetul în mod automat și frecvent și care stochează titlul, cuvinte cheie și, parțial, chiar conținutul paginilor web într-o bază de date. În momentul în care un utilizator apelează la un motor de căutare pentru a găsi o informație, o anumită frază sau un cuvânt, motorul de căutare se va uita în această bază de date și, în funcție de anumite criterii de prioritate, va crea și afișa o listă de rezultate (engleză: hit list ).","timestamp":"2018-06-12T08:59:00Z","_type":"rowiki"}} -{"id":"ruwiki_1","fields":{"title_ru":"Поисковая машина","text_ru":"Поисковая машина (поиско́вый движо́к) — комплекс программ, предназначенный для поиска информации. Обычно является частью поисковой системы. Основными критериями качества работы поисковой машины являются релевантность (степень соответствия запроса и найденного, т.е. уместность результата), полнота индекса, учёт морфологии языка.","timestamp":"2017-03-22T01:16:00Z","_type":"ruwiki"}} -{"id":"svwiki_1","fields":{"title_sv":"Söktjänst","text_sv":"En söktjänst är en webbplats som gör det möjligt att söka efter innehåll på Internet. Söktjänsterna använder sökmotorer, även kallade sökrobotar, för att upptäcka, hämta in och indexera webbsidor.","timestamp":"2018-08-16T22:13:00Z","_type":"svwiki"}} -{"id":"tawiki_1","fields":{"title_ta":"தேடுபொறி","text_ta":"தேடுபொறி அல்லது தேடற்பொறி என்பது ஒரு கணினி நிரலாகும். இது இணையத்தில் குவிந்து கிடக்கும் தகவல்களில் இருந்தோ கணினியில் இருக்கும் தகவல்களில் இருந்தோ நமக்குத் தேவையான தகவலைப்பெற உதவுகின்றது. பொதுவாகப் பாவனையாளர்கள் ஒரு விடயம் சம்பந்தமாகத் தேடுதலை ஒரு சொல்லை வைத்து தேடுவார்கள். தேடுபொறிகள் சுட்டிகளைப் பயன்படுத்தி விரைவான தேடலை மேற்கொள்ளும். தேடுபொறிகள் என்பது பொதுவாக இணையத் தேடுபொறிகளை அல்லது இணையத் தேடற்பொறிகளையே குறிக்கும். வேறுசில தேடுபொறிகள் உள்ளூர் வலையமைப்பை மாத்திரமே தேடும். இணைய தேடு பொறிகள் பல பில்லியன் பக்கங்களில் இருந்து நமக்குத் தேவையான மிகப் பொருத்தமான பக்கங்களைத் தேடித் தரும். வேறுசில தேடற்பொறிகள் செய்திக் குழுக்கள், தகவற்தளங்கள், திறந்த இணையத்தளங்களைப் பட்டியலிடும் DMOZ.org போன்ற இணையத் தளங்களைத் தேடும். மனிதர்களால் எழுதப்பட்ட இணையத் தளங்களைப் பட்டியலிடும் தளங்களைப் போன்றல்லாது தேடு பொறிகள் அல்காரிதங்களைப் பாவித்துத் தேடல்களை மேற்கொள்ளும். வேறு சில தேடற்பொறிகளோ தமது இடைமுகத்தை வழங்கினாலும் உண்மையில் வேறுசில தேடுபொறிகளே தேடலை மேற்கொள்ளும். ஆரம்ப காலத்தில் ASCII முறை வரியுருக்களை கொண்டே தேடு சொற்களை உள்ளிட முடிந்தது. தற்போது ஒருங்குறி எழுத்துக்குறிமுறையை பல தேடுபொறிகளும் ஆதரிப்பதால் ஆங்கிலத்தில் மட்டுமல்லாது உலக மொழிகள் அனைத்திலும் அவ்வம் மொழிப்பக்கங்களை தேடிப்பெறக்கூடியதாகவுள்ளது.","timestamp":"2017-12-24T10:30:00Z","_type":"tawiki"}} -{"id":"tewiki_1","fields":{"title_te":"వెబ్ శోధనా యంత్రం","text_te":"వెబ్ శోధన యంత్రం అనేది వరల్డ్ వైడ్ వెబ్/ప్రపంచ వ్యాప్త వెబ్లో సమాచారాన్ని శోదించటానికి తయారుచేసిన ఒక సాధనం. శోధన ఫలితాలు సాధారణంగా ఒక జాబితాలో ఇవ్వబడతాయి మరియు అవి సాధారణంగా హిట్స్ అని పిలువబడతాయి. ఆ సమాచారం వెబ్ పేజీలు, చిత్రాలు, సమాచారం మరియు ఇతర రకాలైన జాబితాలను కలిగి ఉంటుంది.కొన్ని శోధనా యంత్రాలు డేటా బేస్ లు లేదా ఓపెన్ డైరెక్టరీలలో అందుబాటులో ఉన్న సమాచారాన్ని కూడా వెలికితీస్తాయి. మానవ సంపాదకులచే నిర్వహించబడే క్రమపరిచిన వెబ్ డైరెక్టరీల లా కాకుండా, శోధనా యంత్రాలు సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి ద్వారా లేదా సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి మరియు మానవ శక్తిల మిశ్రమంతో పనిచేస్తాయి.","timestamp":"2017-06-19T11:22:00Z","_type":"tewiki"}} -{"id":"thwiki_1","fields":{"title_th":"เสิร์ชเอนจิน","text_th":"เสิร์ชเอนจิน (search engine) หรือ โปรแกรมค้นหา คือ โปรแกรมที่ช่วยในการสืบค้นหาข้อมูล โดยเฉพาะข้อมูลบนอินเทอร์เน็ต โดยครอบคลุมทั้งข้อความ รูปภาพ ภาพเคลื่อนไหว เพลง ซอฟต์แวร์ แผนที่ ข้อมูลบุคคล กลุ่มข่าว และอื่น ๆ ซึ่งแตกต่างกันไปแล้วแต่โปรแกรมหรือผู้ให้บริการแต่ละราย. เสิร์ชเอนจินส่วนใหญ่จะค้นหาข้อมูลจากคำสำคัญ (คีย์เวิร์ด) ที่ผู้ใช้ป้อนเข้าไป จากนั้นก็จะแสดงรายการผลลัพธ์ที่มันคิดว่าผู้ใช้น่าจะต้องการขึ้นมา ในปัจจุบัน เสิร์ชเอนจินบางตัว เช่น กูเกิล จะบันทึกประวัติการค้นหาและการเลือกผลลัพธ์ของผู้ใช้ไว้ด้วย และจะนำประวัติที่บันทึกไว้นั้น มาช่วยกรองผลลัพธ์ในการค้นหาครั้งต่อ ๆ ไป","timestamp":"2016-06-18T11:06:00Z","_type":"thwiki"}} -{"id":"trwiki_1","fields":{"title_tr":"Arama motoru","text_tr":"Arama motoru, İnternet üzerinde bulunan içeriği aramak için kullanılan bir mekanizmadır. Üç bileşenden oluşur: web robotu, arama indeksi ve kullanıcı arabirimi. Ancak arama sonuçları genellikle sık tıklanan internet sayfalarından oluşan bir liste olarak verilmektedir.","timestamp":"2018-03-13T17:37:00Z","_type":"trwiki"}} -{"id":"zhwiki_1","fields":{"title_zh":"搜索引擎","text_zh":"搜索引擎(英语:search engine)是一种信息检索系统,旨在协助搜索存储在计算机系统中的信息。搜索结果一般被称为“hits”,通常会以表单的形式列出。网络搜索引擎是最常见、公开的一种搜索引擎,其功能为搜索万维网上储存的信息.","timestamp":"2018-08-27T05:47:00Z","_type":"zhwiki"}} diff --git a/example/wiki_doc_arwiki_1.json b/example/wiki_doc_arwiki_1.json deleted file mode 100644 index fdbdac0..0000000 --- a/example/wiki_doc_arwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "arwiki_1", - "fields": { - "title_ar": "محرك بحث", - "text_ar": "محرك البحث (بالإنجليزية: Search engine) هو نظام لإسترجاع المعلومات صمم للمساعدة على البحث عن المعلومات المخزنة على أي نظام حاسوبي. تعرض نتائج البحث عادة على شكل قائمة لأماكن تواجد المعلومات ومرتبة وفق معايير معينة. تسمح محركات البحث باختصار مدة البحث والتغلب على مشكلة أحجام البيانات المتصاعدة (إغراق معلوماتي).", - "timestamp": "2018-03-25T18:04:00Z", - "_type": "arwiki" - } -} diff --git a/example/wiki_doc_bgwiki_1.json b/example/wiki_doc_bgwiki_1.json deleted file mode 100644 index 3ad2735..0000000 --- a/example/wiki_doc_bgwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "bgwiki_1", - "fields": { - "title_bg": "Търсачка", - "text_bg": "Търсачка или търсеща машина (на английски: Web search engine) е специализиран софтуер за извличане на информация, съхранена в компютърна система или мрежа. Това може да е персонален компютър, Интернет, корпоративна мрежа и т.н. Без допълнителни уточнения, най-често под търсачка се разбира уеб(-)търсачка, която търси в Интернет. Други видове търсачки са корпоративните търсачки, които търсят в интранет мрежите, личните търсачки – за индивидуалните компютри и мобилните търсачки. В търсачката потребителят (търсещият) прави запитване за съдържание, отговарящо на определен критерий (обикновено такъв, който съдържа определени думи и фрази). В резултат се получават списък от точки, които отговарят, пълно или частично, на този критерий. Търсачките обикновено използват редовно подновявани индекси, за да оперират бързо и ефикасно. Някои търсачки също търсят в информацията, която е на разположение в нюзгрупите и други големи бази данни. За разлика от Уеб директориите, които се поддържат от хора редактори, търсачките оперират алгоритмично. Повечето Интернет търсачки са притежавани от различни корпорации.", - "timestamp": "2018-07-11T11:03:00Z", - "_type": "bgwiki" - } -} diff --git a/example/wiki_doc_cawiki_1.json b/example/wiki_doc_cawiki_1.json deleted file mode 100644 index ffb67e6..0000000 --- a/example/wiki_doc_cawiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "cawiki_1", - "fields": { - "title_ca": "Motor de cerca", - "text_ca": "Un motor de cerca o de recerca o bé cercador és un programa informàtic dissenyat per ajudar a trobar informació emmagatzemada en un sistema informàtic com ara una xarxa, Internet, un servidor o un ordinador personal. L'objectiu principal és el de trobar altres programes informàtics, pàgines web i documents, entre d'altres. A partir d'una determinada paraula o paraules o una determinada frase l'usuari demana un contingut sota un criteri determinat i retorna una llista de referències que compleixin aquest criteri. El procés es realitza a través de les metadades, vies per comunicar informació que utilitzen els motors per cada cerca. Els índex que utilitzen els cercadors sempre estan actualitzats a través d'un robot web per generar rapidesa i eficàcia en la recerca. Els directoris, en canvi, són gestionats per editors humans.", - "timestamp": "2018-07-09T18:07:00Z", - "_type": "cawiki" - } -} diff --git a/example/wiki_doc_cswiki_1.json b/example/wiki_doc_cswiki_1.json deleted file mode 100644 index 89c994a..0000000 --- a/example/wiki_doc_cswiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "cswiki_1", - "fields": { - "title_cs": "Vyhledávač", - "text_cs": "Vyhledávač je počítačový systém či program, který umožňuje uživateli zadat nějaký libovolný nebo specifikovaný vyhledávaný výraz a získat z velkého objemu dat informace, které jsou v souladu s tímto dotazem. Jako vyhledávač se označují i ​​webové stránky, jejichž hlavní funkcí je poskytování takového systému či programu. Jako internetový vyhledávač se označuje buď vyhledávač, na který se přistupuje přes internet, nebo vyhledávač, jehož zdrojem vyhledávání je internet (tj. WWW, Usenet apod.). Jako online vyhledávač se označuje vyhledávač, při jehož výkonu činnosti dochází k výměně dat v rámci nějaké počítačové sítě, nejčastěji to je internetový vyhledávač. Fulltextový vyhledávač je vyhedávač, který vykonává fulltextové vyhledávání.", - "timestamp": "2017-11-10T21:59:00Z", - "_type": "cswiki" - } -} diff --git a/example/wiki_doc_dawiki_1.json b/example/wiki_doc_dawiki_1.json deleted file mode 100644 index ff1ee22..0000000 --- a/example/wiki_doc_dawiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "dawiki_1", - "fields": { - "title_da": "Søgemaskine", - "text_da": "En søgemaskine er en applikation til at hjælpe en bruger med at finde information. Det kan f.eks. være at finde filer med bestemte data (f.eks. ord), gemt i en computers hukommelse, for eksempel via World Wide Web (kaldes så en websøgemaskine). Ofte bruges søgemaskine fejlagtigt om linkkataloger eller Netguider.", - "timestamp": "2017-09-04T01:54:00Z", - "_type": "dawiki" - } -} diff --git a/example/wiki_doc_dewiki_1.json b/example/wiki_doc_dewiki_1.json deleted file mode 100644 index c5f0a83..0000000 --- a/example/wiki_doc_dewiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "dewiki_1", - "fields": { - "title_de": "Suchmaschine", - "text_de": "Eine Suchmaschine ist ein Programm zur Recherche von Dokumenten, die in einem Computer oder einem Computernetzwerk wie z. B. dem World Wide Web gespeichert sind. Internet-Suchmaschinen haben ihren Ursprung in Information-Retrieval-Systemen. Sie erstellen einen Schlüsselwort-Index für die Dokumentbasis, um Suchanfragen über Schlüsselwörter mit einer nach Relevanz geordneten Trefferliste zu beantworten. Nach Eingabe eines Suchbegriffs liefert eine Suchmaschine eine Liste von Verweisen auf möglicherweise relevante Dokumente, meistens dargestellt mit Titel und einem kurzen Auszug des jeweiligen Dokuments. Dabei können verschiedene Suchverfahren Anwendung finden.", - "timestamp": "2017-09-04T01:54:00Z", - "_type": "dewiki" - } -} diff --git a/example/wiki_doc_elwiki_1.json b/example/wiki_doc_elwiki_1.json deleted file mode 100644 index 42f143b..0000000 --- a/example/wiki_doc_elwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "elwiki_1", - "fields": { - "title_el": "Μηχανή αναζήτησης", - "text_el": "Μια μηχανή αναζήτησης είναι μια εφαρμογή που επιτρέπει την αναζήτηση κειμένων και αρχείων στο Διαδίκτυο. Αποτελείται από ένα πρόγραμμα υπολογιστή που βρίσκεται σε έναν ή περισσότερους υπολογιστές στους οποίους δημιουργεί μια βάση δεδομένων με τις πληροφορίες που συλλέγει από το διαδίκτυο, και το διαδραστικό περιβάλλον που εμφανίζεται στον τελικό χρήστη ο οποίος χρησιμοποιεί την εφαρμογή από άλλον υπολογιστή συνδεδεμένο στο διαδίκτυο. Οι μηχανές αναζήτησης αποτελούνται από 3 είδη λογισμικού, το spider software, το index software και το query software.", - "timestamp": "2017-11-21T19:57:00Z", - "_type": "elwiki" - } -} diff --git a/example/wiki_doc_enwiki_1.json b/example/wiki_doc_enwiki_1.json deleted file mode 100644 index bcb7d18..0000000 --- a/example/wiki_doc_enwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "enwiki_1", - "fields": { - "title_en": "Search engine (computing)", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "_type": "enwiki" - } -} diff --git a/example/wiki_doc_eswiki_1.json b/example/wiki_doc_eswiki_1.json deleted file mode 100644 index 5d3c7aa..0000000 --- a/example/wiki_doc_eswiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "eswiki_1", - "fields": { - "title_es": "Motor de búsqueda", - "text_es": "Un motor de búsqueda o buscador es un sistema informático que busca archivos almacenados en servidores web gracias a su spider (también llamado araña web). Un ejemplo son los buscadores de Internet (algunos buscan únicamente en la web, pero otros lo hacen además en noticias, servicios como Gopher, FTP, etc.) cuando se pide información sobre algún tema. Las búsquedas se hacen con palabras clave o con árboles jerárquicos por temas; el resultado de la búsqueda «Página de resultados del buscador» es un listado de direcciones web en los que se mencionan temas relacionados con las palabras clave buscadas. Como operan de forma automática, los motores de búsqueda contienen generalmente más información que los directorios. Sin embargo, estos últimos también han de construirse a partir de búsquedas (no automatizadas) o bien a partir de avisos dados por los creadores de páginas.", - "timestamp": "2018-08-30T11:30:00Z", - "_type": "eswiki" - } -} diff --git a/example/wiki_doc_fawiki_1.json b/example/wiki_doc_fawiki_1.json deleted file mode 100644 index 093cc83..0000000 --- a/example/wiki_doc_fawiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "fawiki_1", - "fields": { - "title_fa": "موتور جستجو (پردازش)", - "text_fa": "موتور جستجو یا جویشگر، در فرهنگ رایانه، به طور عمومی به برنامه‌ای گفته می‌شود که کلمات کلیدی را در یک سند یا بانک اطلاعاتی جستجو می‌کند. در اینترنت به برنامه‌ای گفته می‌شود که کلمات کلیدی موجود در فایل‌ها و سندهای وب جهانی، گروه‌های خبری، منوهای گوفر و آرشیوهای FTP را جستجو می‌کند. جویشگرهای زیادی وجود دارند که امروزه از معروفترین و پراستفاده‌ترین آنها می‌توان به google و یاهو! جستجو اشاره کرد.", - "timestamp": "2017-01-06T02:46:00Z", - "_type": "fawiki" - } -} diff --git a/example/wiki_doc_fiwiki_1.json b/example/wiki_doc_fiwiki_1.json deleted file mode 100644 index e930816..0000000 --- a/example/wiki_doc_fiwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "fiwiki_1", - "fields": { - "title_fi": "Hakukone", - "text_fi": "Hakukone on web-pohjainen ohjelma, joka etsii jatkuvasti Internetistä (varsinkin Webistä) uusia sivuja eritellen ja liittäen ne hakemistoonsa erityisten hakusanojen mukaan. Näitä hyväksi käyttäen hakukone tulostaa käyttäjän syöttämiä hakusanoja lähimpänä olevat sivut. Analysointi tapahtuu käytännössä eri hakukoneissa erilaisilla menetelmillä.", - "timestamp": "2017-10-04T14:33:00Z", - "_type": "fiwiki" - } -} diff --git a/example/wiki_doc_frwiki_1.json b/example/wiki_doc_frwiki_1.json deleted file mode 100644 index 4090cd0..0000000 --- a/example/wiki_doc_frwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "frwiki_1", - "fields": { - "title_fr": "Moteur de recherche", - "text_fr": "Un moteur de recherche est une application web permettant de trouver des ressources à partir d'une requête sous forme de mots. Les ressources peuvent être des pages web, des articles de forums Usenet, des images, des vidéos, des fichiers, etc. Certains sites web offrent un moteur de recherche comme principale fonctionnalité ; on appelle alors « moteur de recherche » le site lui-même. Ce sont des instruments de recherche sur le web sans intervention humaine, ce qui les distingue des annuaires. Ils sont basés sur des « robots », encore appelés « bots », « spiders «, « crawlers » ou « agents », qui parcourent les sites à intervalles réguliers et de façon automatique pour découvrir de nouvelles adresses (URL). Ils suivent les liens hypertextes qui relient les pages les unes aux autres, les uns après les autres. Chaque page identifiée est alors indexée dans une base de données, accessible ensuite par les internautes à partir de mots-clés. C'est par abus de langage qu'on appelle également « moteurs de recherche » des sites web proposant des annuaires de sites web : dans ce cas, ce sont des instruments de recherche élaborés par des personnes qui répertorient et classifient des sites web jugés dignes d'intérêt, et non des robots d'indexation. Les moteurs de recherche ne s'appliquent pas qu'à Internet : certains moteurs sont des logiciels installés sur un ordinateur personnel. Ce sont des moteurs dits « de bureau » qui combinent la recherche parmi les fichiers stockés sur le PC et la recherche parmi les sites Web — on peut citer par exemple Exalead Desktop, Google Desktop et Copernic Desktop Search, Windex Server, etc. On trouve également des métamoteurs, c'est-à-dire des sites web où une même recherche est lancée simultanément sur plusieurs moteurs de recherche, les résultats étant ensuite fusionnés pour être présentés à l'internaute. On peut citer dans cette catégorie Ixquick, Mamma, Kartoo, Framabee ou Lilo.", - "timestamp": "2018-05-30T15:15:00Z", - "_type": "frwiki" - } -} diff --git a/example/wiki_doc_gawiki_1.json b/example/wiki_doc_gawiki_1.json deleted file mode 100644 index ad69390..0000000 --- a/example/wiki_doc_gawiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "gawiki_1", - "fields": { - "title_ga": "Inneall cuardaigh", - "text_ga": "Acmhainn ar an ngréasán domhanda atá insroichte le brabhsálaí Gréasáin, a chabhraíonn leis an úsáideoir ionaid is eolas a aimsiú. Bíonn na hinnill cuardaigh (Yahoo, Lycos, Google, Ask Jeeves) ag cuardach tríd an ngréasán an t-am ar fad, ag tógáil innéacsanna ábhar éagsúla — mar shampla, ag aimsiú teidil, fotheidil, eochairfhocail is céadlínte cáipéisí. Uaidh sin, is féidir cuid mhaith cáipéisí éagsúla ar ábhar ar leith a aisghabháil. Déanann an cuardach leanúnach cinnte de go bhfuil na hinnéacsanna suas chun dáta. Mar sin féin, aisghabhann na hinnill an-chuid cháipéisí nach mbaineann le hábhar, agus tá an-iarracht ar siúl an t-am ar fad iad a fheabhsú.", - "timestamp": "2013-10-27T18:17:00Z", - "_type": "gawiki" - } -} diff --git a/example/wiki_doc_glwiki_1.json b/example/wiki_doc_glwiki_1.json deleted file mode 100644 index 667e187..0000000 --- a/example/wiki_doc_glwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "glwiki_1", - "fields": { - "title_gl": "Motor de busca", - "text_gl": "Un motor de busca ou buscador é un sistema informático que procura arquivos almacenados en servidores web, un exemplo son os buscadores de internet (algúns buscan só na Web pero outros buscan ademais en News, Gopher, FTP etc.) cando lles pedimos información sobre algún tema. As procuras fanse con palabras clave ou con árbores xerárquicas por temas; o resultado da procura é unha listaxe de direccións Web nas que se mencionan temas relacionados coas palabras clave buscadas.", - "timestamp": "2016-10-31T13:33:00Z", - "_type": "glwiki" - } -} diff --git a/example/wiki_doc_guwiki_1.json b/example/wiki_doc_guwiki_1.json deleted file mode 100644 index a0afc9b..0000000 --- a/example/wiki_doc_guwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "guwiki_1", - "fields": { - "title_gu": "વેબ શોધ એન્જીન", - "text_gu": "વેબ શોધ એન્જિન એ વર્લ્ડ વાઈડ વેબ (World Wide Web) પર વિવિધ માહિતી શોધવા માટે ઉપયોગમાં લેવામાં આવે છે. શોધ લીસ્ટને સામાન્ય રીતે યાદીમાં દર્શાવવામાં આવે છે અને જેને સામાન્ય રીતે હીટ્સ કહેવામાં આવે છે. જે માહિતી મળે છે તેમાં વેબ પૃષ્ઠ (web page), છબીઓ, માહિતી અને અન્ય પ્રકારની ફાઈલો હોય છે. કેટલાક શોધ એન્જિનો ન્યુઝબુક, ડેટાબેઝ અને અન્ય પ્રકારની ઓપન ડીરેક્ટરી (open directories)ઓની વિગતો પણ આપે છે. વ્યકિતઓ દ્વારા દુરસ્ત થતી વેબ ડાયરેક્ટરીઝ (Web directories)થી અલગ રીતે, શોધ એન્જિન ઍલ્ગરિધમનો અથવા ઍલ્ગરિધમ (algorithmic) અને માનવીય બાબતોના મિક્ષણનો ઉપયોગ કરે છે.", - "timestamp": "2013-04-04T19:28:00Z", - "_type": "guwiki" - } -} diff --git a/example/wiki_doc_hiwiki_1.json b/example/wiki_doc_hiwiki_1.json deleted file mode 100644 index 494a176..0000000 --- a/example/wiki_doc_hiwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "hiwiki_1", - "fields": { - "title_hi": "खोज इंजन", - "text_hi": "ऐसे कम्प्यूटर प्रोग्राम खोजी इंजन (search engine) कहलाते हैं जो किसी कम्प्यूटर सिस्टम पर भण्डारित सूचना में से वांछित सूचना को ढूढ निकालते हैं। ये इंजन प्राप्त परिणामों को प्रायः एक सूची के रूप में प्रस्तुत करते हैं जिससे वांछित सूचना की प्रकृति और उसकी स्थिति का पता चलता है। खोजी इंजन किसी सूचना तक अपेक्षाकृत बहुत कम समय में पहुँचने में हमारी सहायता करते हैं। वे 'सूचना ओवरलोड' से भी हमे बचाते हैं। खोजी इंजन का सबसे प्रचलित रूप 'वेब खोजी इंजन' है जो वर्ल्ड वाइड वेब पर सूचना खोजने के लिये प्रयुक्त होता है।", - "timestamp": "2017-10-19T20:09:00Z", - "_type": "hiwiki" - } -} diff --git a/example/wiki_doc_huwiki_1.json b/example/wiki_doc_huwiki_1.json deleted file mode 100644 index 95f97a0..0000000 --- a/example/wiki_doc_huwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "huwiki_1", - "fields": { - "title_hu": "Keresőmotor", - "text_hu": "A keresőmotor az informatikában egy program vagy alkalmazás, amely bizonyos feltételeknek (többnyire egy szónak vagy kifejezésnek) megfelelő információkat keres valamilyen számítógépes környezetben. Ez a cikk a World Wide Weben (és esetleg az internet más részein, például a Useneten) kereső alkalmazásokról szól, a keresőmotor kifejezés önmagában általában ezekre vonatkozik. Másfajta keresőmotorokra példák a vállalati keresőmotorok, amik egy intraneten, és a személyi keresőmotorok, amik egy személyi számítógép állományai között keresnek.", - "timestamp": "2018-05-15T20:40:00Z", - "_type": "huwiki" - } -} diff --git a/example/wiki_doc_hywiki_1.json b/example/wiki_doc_hywiki_1.json deleted file mode 100644 index 0e36b1a..0000000 --- a/example/wiki_doc_hywiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "hywiki_1", - "fields": { - "title_hy": "Որոնողական համակարգ", - "text_hy": "Որոնողական համակարգը գործիք է, որը նախատեսված է համապատասխան բառերով Համաշխարհային ցանցում որոնումներ կատարելու համար։ Ստեղծված է համացանցում և FTP սերվերներում ինֆորմացիա փնտրելու համար։ Փնտրված արդյունքները ընդհանրապես ներկայացվում են արդյունքների ցանկում և սովորաբար կոչվում են նպատակակակետ, հիթ։ Ինֆորմացիան կարող է բաղկացած լինել վեբ էջերից, նկարներից, ինֆորմացիաներից և այլ տիպի ֆայլերից ու տվյալներից։ Այն կարող է օգտագործվել տարբեր տեսակի տեղեկատվություն որոնելու համար, ներառյալ՝ կայքեր, ֆորումներ, նկարներ, վիդեոներ, ֆայլեր և այլն։ Որոշ կայքեր արդեն իրենցից ներկայացնում են ինչ-որ որոնողական համակարգ, օրինակ՝ Dailymotion, YouTube և Google Videos ինտերնետում տեղադրված տեսահոլովակների որոնողական կայքեր են։ Որոնողական կայքը բաղկացած է \"ռոբոտներից\", որոնց անվանում են նաև bot, spider, crawler, որոնք ավտոմատ կերպով, առանց մարդկային միջամտության պարբերաբար հետազոտում են կայքերը։ Որոնողական կայքերը հետևում են հղումներին, որոնք կապված լինելով իրար հետ ինդեքսավորում է յուրաքանչյուր էջ տվյալների բազայում՝ հետագայում բանալի բառերի օգնությամբ դառնալով հասանելի ինտերնետից օգտվողների համար։ Սխալմամբ, որոնողական կայքեր են անվանում նաև այն կայքերը, որոնք իրենցից ներկայացնում են կայքային տեղեկատուներ։ Այս կայքերում ուշադրության արժանի կայքերը ցուցակագրվում և դասակարգվում են մարդկային ռեսուրսների շնորհիվ, այլ ոչ թե բոտերի կամ ռոբետների միջոցով։ Այդ կայքերից կարելի է նշել օրինակ՝ Yahoo!։ Yahoo!-ի որոնողական կայքը գտնվում է այստեղ։ Բոլոր որոնողական համակարգերը նախատեսված են ինտերնետում որոնում իրականացնելու համար, սակայն կան որոշ որոնողական համակարգերի տարատեսակներ, որոնք համակարգչային ծրագրեր են և հետևաբար տեղակայվում են համակարգչի մեջ։ Այս համակարգերը կոչվում են desktop։ Վերջիներս հնարավորություն են տալիս որոնելու թե համակարգչի մեջ կուտակված ֆայլեը, թե կայքերում տեղադրված ռեսուրսները։ Այդ ծրագրերից ամենահայտնիներն են՝ Exalead Desktop, Copernic Desktop Search Գոյություն ունեն նաև մետա-որոնողական համակարգեր, այսինքն կայքեր, որ նույն որոնումը կատարում են միաժամանակ տարբեր որոնողական կայքերի միջնորդությամբ։ Որոնման արդյունքները հետո դասակարգվում են որպեսզի ներկայացվեն օգտագործողին։ Մետա-որոնողական համակարգերի շարքից կարելի է թվարկել օրինակ՝ Mamma և Kartoo։", - "timestamp": "2017-11-20T17:47:00Z", - "_type": "hywiki" - } -} diff --git a/example/wiki_doc_idwiki_1.json b/example/wiki_doc_idwiki_1.json deleted file mode 100644 index 16e5802..0000000 --- a/example/wiki_doc_idwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "idwiki_1", - "fields": { - "title_id": "Mesin pencari web", - "text_id": "Mesin pencari web atau mesin telusur web (bahasa Inggris: web search engine) adalah program komputer yang dirancang untuk melakukan pencarian atas berkas-berkas yang tersimpan dalam layanan www, ftp, publikasi milis, ataupun news group dalam sebuah ataupun sejumlah komputer peladen dalam suatu jaringan. Mesin pencari merupakan perangkat penelusur informasi dari dokumen-dokumen yang tersedia. Hasil pencarian umumnya ditampilkan dalam bentuk daftar yang seringkali diurutkan menurut tingkat akurasi ataupun rasio pengunjung atas suatu berkas yang disebut sebagai hits. Informasi yang menjadi target pencarian bisa terdapat dalam berbagai macam jenis berkas seperti halaman situs web, gambar, ataupun jenis-jenis berkas lainnya. Beberapa mesin pencari juga diketahui melakukan pengumpulan informasi atas data yang tersimpan dalam suatu basis data ataupun direktori web. Sebagian besar mesin pencari dijalankan oleh perusahaan swasta yang menggunakan algoritme kepemilikan dan basis data tertutup, di antaranya yang paling populer adalah safari Google (MSN Search dan Yahoo!). Telah ada beberapa upaya menciptakan mesin pencari dengan sumber terbuka (open source), contohnya adalah Htdig, Nutch, Egothor dan OpenFTS.", - "timestamp": "2017-11-20T17:47:00Z", - "_type": "idwiki" - } -} diff --git a/example/wiki_doc_itwiki_1.json b/example/wiki_doc_itwiki_1.json deleted file mode 100644 index b8bdd5d..0000000 --- a/example/wiki_doc_itwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "itwiki_1", - "fields": { - "title_it": "Motore di ricerca", - "text_it": "Nell'ambito delle tecnologie di Internet, un motore di ricerca (in inglese search engine) è un sistema automatico che, su richiesta, analizza un insieme di dati (spesso da esso stesso raccolti) e restituisce un indice dei contenuti disponibili[1] classificandoli in modo automatico in base a formule statistico-matematiche che ne indichino il grado di rilevanza data una determinata chiave di ricerca. Uno dei campi in cui i motori di ricerca trovano maggiore utilizzo è quello dell'information retrieval e nel web. I motori di ricerca più utilizzati nel 2017 sono stati: Google, Bing, Baidu, Qwant, Yandex, Ecosia, DuckDuckGo.", - "timestamp": "2018-07-16T12:20:00Z", - "_type": "itwiki" - } -} diff --git a/example/wiki_doc_jawiki_1.json b/example/wiki_doc_jawiki_1.json deleted file mode 100644 index 264ff02..0000000 --- a/example/wiki_doc_jawiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "jawiki_1", - "fields": { - "title_ja": "検索エンジン", - "text_ja": "検索エンジン(けんさくエンジン、英語: search engine)は、狭義にはインターネットに存在する情報(ウェブページ、ウェブサイト、画像ファイル、ネットニュースなど)を検索する機能およびそのプログラム。インターネットの普及初期には、検索としての機能のみを提供していたウェブサイトそのものを検索エンジンと呼んだが、現在では様々なサービスが加わったポータルサイト化が進んだため、検索をサービスの一つとして提供するウェブサイトを単に検索サイトと呼ぶことはなくなっている。広義には、インターネットに限定せず情報を検索するシステム全般を含む。狭義の検索エンジンは、ロボット型検索エンジン、ディレクトリ型検索エンジン、メタ検索エンジンなどに分類される。広義の検索エンジンとしては、ある特定のウェブサイト内に登録されているテキスト情報の全文検索機能を備えたソフトウェア(全文検索システム)等がある。検索エンジンは、検索窓と呼ばれるボックスにキーワードを入力して検索をかけるもので、全文検索が可能なものと不可能なものとがある。検索サイトを一般に「検索エンジン」と呼ぶことはあるが、厳密には検索サイト自体は検索エンジンでない。", - "timestamp": "2018-05-30T00:52:00Z", - "_type": "jawiki" - } -} diff --git a/example/wiki_doc_knwiki_1.json b/example/wiki_doc_knwiki_1.json deleted file mode 100644 index a24e9cc..0000000 --- a/example/wiki_doc_knwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "knwiki_1", - "fields": { - "title_kn": "ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ", - "text_kn": "ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ ಎಂದರೆ World Wide Webನಲ್ಲಿ ಮಾಹಿತಿ ಹುಡುಕುವುದಕ್ಕಾಗಿ ವಿನ್ಯಾಸಗೊಳಿಸಲಾದ ಒಂದು ಸಾಧನ. ಹುಡುಕಾಟದ ಫಲಿತಾಂಶಗಳನ್ನು ಸಾಮಾನ್ಯವಾಗಿ ಒಂದು ಪಟ್ಟಿಯ ರೂಪದಲ್ಲಿ ಪ್ರಸ್ತುತಪಡಿಸಲಾಗುತ್ತದೆ ಮತ್ತು ಇವನ್ನು ’ಹಿಟ್ಸ್’ ಎಂದು ಕರೆಯಲಾಗುತ್ತದೆ. ಈ ಮಾಹಿತಿಯು ಅನೇಕ ಜಾಲ ಪುಟಗಳು, ಚಿತ್ರಗಳು, ಮಾಹಿತಿ ಹಾಗೂ ಇತರೆ ಕಡತಗಳನ್ನು ಹೊಂದಿರಬಹುದು. ಕೆಲವು ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಬೇರೆ ದತ್ತಸಂಚಯಗಳು ಅಥವಾ ಮುಕ್ತ ಮಾಹಿತಿ ಸೂಚಿಗಳಿಂದ ದತ್ತಾಂಶಗಳ ಗಣಿಗಾರಿಕೆ ಮಾಡಿ ಹೊರತೆಗೆಯುತ್ತವೆ. ಜಾಲ ಮಾಹಿತಿಸೂಚಿಗಳನ್ನು ಸಂಬಂಧಿಸಿದ ಸಂಪಾದಕರು ನಿರ್ವಹಿಸಿದರೆ, ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಗಣನಪದ್ಧತಿಯ ಮೂಲಕ ಅಥವಾ ಗಣನಪದ್ಧತಿ ಮತ್ತು ಮಾನವ ಹೂಡುವಳಿಯ ಮಿಶ್ರಣದ ಮುಖಾಂತರ ಕಾರ್ಯನಿರ್ವಹಿಸುತ್ತವೆ.", - "timestamp": "2017-10-03T14:13:00Z", - "_type": "knwiki" - } -} diff --git a/example/wiki_doc_kowiki_1.json b/example/wiki_doc_kowiki_1.json deleted file mode 100644 index 3a612fe..0000000 --- a/example/wiki_doc_kowiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "kowiki_1", - "fields": { - "title_cjk": "검색 엔진", - "text_cjk": "검색 엔진은 컴퓨터 시스템에 저장된 정보를 찾아주는 것을 도와주도록 설계된 정보 검색 시스템이다. 이러한 검색 결과는 목록으로 표현되는 것이 보통이다. 검색 엔진을 사용하면 정보를 찾는데 필요한 시간을 최소화할 수 있다. 가장 눈에 띄는 형태의 공용 검색 엔진으로는 웹 검색 엔진이 있으며 월드 와이드 웹에서 정보를 찾아준다.", - "timestamp": "2017-11-19T12:50:00Z", - "_type": "kowiki" - } -} diff --git a/example/wiki_doc_mlwiki_1.json b/example/wiki_doc_mlwiki_1.json deleted file mode 100644 index 09c633b..0000000 --- a/example/wiki_doc_mlwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "mlwiki_1", - "fields": { - "title_ml": "വെബ് സെർച്ച് എഞ്ചിൻ", - "text_ml": "വേൾഡ് വൈഡ് വെബ്ബിലുള്ള വിവരങ്ങൾ തിരയാനുള്ള ഒരു ഉപാധിയാണ്‌ വെബ് സെർച്ച് എഞ്ചിൻ അഥവാ സെർച്ച് എഞ്ചിൻ. തിരച്ചിൽ ഫലങ്ങൾ സാധാരണായായി ഒരു പട്ടികയായി നൽകുന്നു, തിരച്ചിൽ ഫലങ്ങളെ ഹിറ്റുകൾ എന്നാണ്‌ വിളിച്ചുവരുന്നത്[അവലംബം ആവശ്യമാണ്]. തിരച്ചിൽ ഫലങ്ങളിൽ വെബ് പേജുകൾ, ചിത്രങ്ങൾ, വിവരങ്ങൾ, വെബ്ബിലുള്ള മറ്റ് ഫയൽ തരങ്ങൾ എന്നിവ ഉൾപ്പെടാം. അൽഗോരിതങ്ങൾ ഉപയോഗിച്ചാണ് സെർച്ച് എഞ്ചിനുകൾ പ്രവർത്തിക്കുന്നത്.", - "timestamp": "2010-05-05T15:06:00Z", - "_type": "mlwiki" - } -} diff --git a/example/wiki_doc_nlwiki_1.json b/example/wiki_doc_nlwiki_1.json deleted file mode 100644 index 0b2a52f..0000000 --- a/example/wiki_doc_nlwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "nlwiki_1", - "fields": { - "title_nl": "Zoekmachine", - "text_nl": "Een zoekmachine is een computerprogramma waarmee informatie kan worden gezocht in een bepaalde collectie; dit kan een bibliotheek, het internet, of een persoonlijke verzameling zijn. Zonder nadere aanduiding wordt meestal een webdienst bedoeld waarmee met behulp van vrije trefwoorden volledige tekst (full text) kan worden gezocht in het gehele wereldwijde web. In tegenstelling tot startpagina's of webgidsen is er geen of zeer weinig menselijke tussenkomst nodig; het bezoeken van de webpagina's en het sorteren van de rangschikkingen gebeurt met behulp van een algoritme. Google is wereldwijd de meest gebruikte zoekmachine, andere populaire zoekmachines zijn Yahoo!, Bing en Baidu.", - "timestamp": "2018-05-07T11:05:00Z", - "_type": "nlwiki" - } -} diff --git a/example/wiki_doc_nowiki_1.json b/example/wiki_doc_nowiki_1.json deleted file mode 100644 index 39d5a35..0000000 --- a/example/wiki_doc_nowiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "nowiki_1", - "fields": { - "title_no": "Søkemotor", - "text_no": "En søkemotor er en type programvare som leter frem informasjon fra Internett (nettsider eller andre nettressurser) eller begrenset til et datasystem, der informasjonen samsvarer med et gitt søk, og rangerer treffene etter hva den oppfatter som mest relevant. Typisk ligger søkemotoren tilgjengelig som et nettsted, der brukeren legger inn søkeord ev. sammen med filterinnstillinger, og treffene vises gjerne som klikkbare lenker. Søkemotoren kan enten gjøre søk på hele Internett (for eksempel Google, Bing, Kvasir og Yahoo!), innenfor et bestemt nettsted (for eksempel søk innenfor VGs nettavis), eller innenfor et bestemt tema (f.eks. Kelkoo, som søker etter priser på produkter, og Picsearch, som søker etter bilder). En bedrift kan også sette opp en intern bedrifts-søkemotor for å få enklere tilgang til alle dokumenter og databaser i bedriften.", - "timestamp": "2018-02-05T14:15:00Z", - "_type": "nowiki" - } -} diff --git a/example/wiki_doc_pswiki_1.json b/example/wiki_doc_pswiki_1.json deleted file mode 100644 index 645fa9e..0000000 --- a/example/wiki_doc_pswiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "pswiki_1", - "fields": { - "title_ps": "انټرنټ لټوونکی ماشين", - "text_ps": "نټرنټ د معلوماتو يوه داسې پراخه نړۍ ده چې يوه پوله هم نه لري. هره ثانيه په زرگونو معلوماتي توکي په کې ورځای کېږي، خو بيا هم د ډکېدو کومه اندېښنه نه رامنځته کېږي. حيرانوونکې خبره بيا دا ده چې دغه ټول معلومات په داسې مهارت سره په دغه نړۍ کې ځای شوي دي، چې سړی يې د سترگو په رپ کې د نړۍ په هر گوټ کې ترلاسه کولای شي. د کيبورډ په يو دوو تڼيو زور کولو او د موږك په يو دوو کليکونو سره خپلو ټولو پوښتنو ته ځواب موندلای شئ. ټول معلومات په ځانگړو انټرنټ پاڼو کې خوندي وي، نو که سړي ته د يوې پاڼې پته معلومه وي نو سم له لاسه به دغه پاڼه د انټرنټ پاڼو په کتونکي پروگرام کې پرانيزي، خو که سړی بيا يو معلومات غواړي او د هغې پاڼې پته ورسره نه وي، چې دغه ځانگړي معلومات په كې ځای شوي دي، نو بيا سړی يوه داسې پياوړي ځواک ته اړتيا لري، چې د سترگو په رپ کې ټول انټرنټ چاڼ کړي او دغه ځانگړي معلومات راوباسي. له نېکه مرغه د دغه ځواک غم خوړل شوی دی او ډېرInternet Search Engine انټرنټ لټوونکي ماشينونه جوړ کړای شوي دي، چې په وړيا توگه ټول انټرنټ تر ثانيو هم په لږ وخت کې چاڼ کوي او زموږ د خوښې معلومات راښکاره کوي. دغو ماشينونو ته سړی يوه ځانگړې کليمه ورکوي او هغوی ټول انټرنټ په دغې وركړل شوې کلمې پسې لټوي او هر دقيق معلومات چې لاسته ورځي، نو د کمپيوټر پر پرده يې راښکاره کوي. د دغو ماشينونو په ډله کې يو پياوړی ماشين د Google په نوم دی. د نوموړي ماشين بنسټ په ١٩٩٨م کال کې د متحدو ايالاتو د Standford پوهنتون دوو محصلينو Larry Page او Sergey Brin کښېښود. د دغه ماشين خدمات سړی د www.google.com په انټرنټ پاڼه کې کارولای شي. نوموړی ماشين د نړۍ په گڼ شمېر ژبو باندې خدمات وړاندې کوي او داسې چټک او دقيق لټون کوي چې د انټرنټ نور ډېر غښتلي ماشينونه ورته گوته پر غاښ پاتې دي. گوگل په ټوله نړۍ کې کارول کېږي او تر نيمي ثانيي هم په لنډ وخت کې په ميليارډونو انټرنټ پاڼې چاڼ کوي او خپلو کاروونکو ته په پرتله ييزه توگه دقيق معلومات راباسي. گوگل په يوه ورځ کې څه كمُ ٢٠٠ ميليونه پوښتنې ځوابوي. دا ( گوگل) تورى خپله د يو امريکايي رياضيپوه د وراره له خوا په لومړي ځل د يوې لوبې لپاره کارول شوی و. هغه دغه تورى د يو سلو صفرونو ( 1000?.) غوندې لوی عدد ته د نوم په توگه کاراوه. دغه نوم د نوموړي شرکت د دغه توان ښكارندوى دى، چې په لنډ وخت کې په لويه کچه پوښتنو ته ځواب ورکوي او معلومات لټوي. سړی چې د گوگل چټکتيا او دقيقوالي ته ځير شي، نو دا پوښته راپورته کېږي چې د دې ماشين شا ته به څومره پرمختللي کمپيوټرونه او پياوړی تخنيک پټ وي. خو اصلاً د گوگل شا ته په يوه لوی جال کې د منځنۍ بيې کمپيوټرونه سره نښلول شوي دي . په دې توگه په زرگونو کمپيوټرونه هممهاله په کار بوخت وي، چې په ترڅ کې يې د معلوماتو لټول او چاڼ کول چټکتيا مومي. د يوې پوښتنې له اخيستلو څخه راواخله معلوماتو تر لټولو او بيا د دقيقوالي له مخې په يوه ځانگړي طرز بېرته کاروونکي يا پوښتونكي تر ښوولو پورې ټولې چارې د درېيو Software پروگرامونه په لاس کې دي، چې په دغه زرگونو کمپيوټرونو کې ځای پر ځای شوي دي.", - "timestamp": "2015-12-15T18:53:00Z", - "_type": "pswiki" - } -} diff --git a/example/wiki_doc_ptwiki_1.json b/example/wiki_doc_ptwiki_1.json deleted file mode 100644 index b79cbb6..0000000 --- a/example/wiki_doc_ptwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "ptwiki_1", - "fields": { - "title_pt": "Motor de busca", - "text_pt": "Motor de pesquisa (português europeu) ou ferramenta de busca (português brasileiro) ou buscador (em inglês: search engine) é um programa desenhado para procurar palavras-chave fornecidas pelo utilizador em documentos e bases de dados. No contexto da internet, um motor de pesquisa permite procurar palavras-chave em documentos alojados na world wide web, como aqueles que se encontram armazenados em websites. Os motores de busca surgiram logo após o aparecimento da Internet, com a intenção de prestar um serviço extremamente importante: a busca de qualquer informação na rede, apresentando os resultados de uma forma organizada, e também com a proposta de fazer isto de uma maneira rápida e eficiente. A partir deste preceito básico, diversas empresas se desenvolveram, chegando algumas a valer milhões de dólares. Entre as maiores empresas encontram-se o Google, o Yahoo, o Bing, o Lycos, o Cadê e, mais recentemente, a Amazon.com com o seu mecanismo de busca A9 porém inativo. Os buscadores se mostraram imprescindíveis para o fluxo de acesso e a conquista novos visitantes. Antes do advento da Web, havia sistemas para outros protocolos ou usos, como o Archie para sites FTP anônimos e o Veronica para o Gopher (protocolo de redes de computadores que foi desenhado para indexar repositórios de documentos na Internet, baseado-se em menus).", - "timestamp": "2017-11-09T14:38:00Z", - "_type": "ptwiki" - } -} diff --git a/example/wiki_doc_rowiki_1.json b/example/wiki_doc_rowiki_1.json deleted file mode 100644 index 7562616..0000000 --- a/example/wiki_doc_rowiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "rowiki_1", - "fields": { - "title_ro": "Motor de căutare", - "text_ro": "Un motor de căutare este un program apelabil căutător, care accesează Internetul în mod automat și frecvent și care stochează titlul, cuvinte cheie și, parțial, chiar conținutul paginilor web într-o bază de date. În momentul în care un utilizator apelează la un motor de căutare pentru a găsi o informație, o anumită frază sau un cuvânt, motorul de căutare se va uita în această bază de date și, în funcție de anumite criterii de prioritate, va crea și afișa o listă de rezultate (engleză: hit list ).", - "timestamp": "2018-06-12T08:59:00Z", - "_type": "rowiki" - } -} diff --git a/example/wiki_doc_ruwiki_1.json b/example/wiki_doc_ruwiki_1.json deleted file mode 100644 index 818b84f..0000000 --- a/example/wiki_doc_ruwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "ruwiki_1", - "fields": { - "title_ru": "Поисковая машина", - "text_ru": "Поисковая машина (поиско́вый движо́к) — комплекс программ, предназначенный для поиска информации. Обычно является частью поисковой системы. Основными критериями качества работы поисковой машины являются релевантность (степень соответствия запроса и найденного, т.е. уместность результата), полнота индекса, учёт морфологии языка.", - "timestamp": "2017-03-22T01:16:00Z", - "_type": "ruwiki" - } -} diff --git a/example/wiki_doc_svwiki_1.json b/example/wiki_doc_svwiki_1.json deleted file mode 100644 index 4c9210e..0000000 --- a/example/wiki_doc_svwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "svwiki_1", - "fields": { - "title_sv": "Söktjänst", - "text_sv": "En söktjänst är en webbplats som gör det möjligt att söka efter innehåll på Internet. Söktjänsterna använder sökmotorer, även kallade sökrobotar, för att upptäcka, hämta in och indexera webbsidor.", - "timestamp": "2018-08-16T22:13:00Z", - "_type": "svwiki" - } -} diff --git a/example/wiki_doc_tawiki_1.json b/example/wiki_doc_tawiki_1.json deleted file mode 100644 index 1b7e1aa..0000000 --- a/example/wiki_doc_tawiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "tawiki_1", - "fields": { - "title_ta": "தேடுபொறி", - "text_ta": "தேடுபொறி அல்லது தேடற்பொறி என்பது ஒரு கணினி நிரலாகும். இது இணையத்தில் குவிந்து கிடக்கும் தகவல்களில் இருந்தோ கணினியில் இருக்கும் தகவல்களில் இருந்தோ நமக்குத் தேவையான தகவலைப்பெற உதவுகின்றது. பொதுவாகப் பாவனையாளர்கள் ஒரு விடயம் சம்பந்தமாகத் தேடுதலை ஒரு சொல்லை வைத்து தேடுவார்கள். தேடுபொறிகள் சுட்டிகளைப் பயன்படுத்தி விரைவான தேடலை மேற்கொள்ளும். தேடுபொறிகள் என்பது பொதுவாக இணையத் தேடுபொறிகளை அல்லது இணையத் தேடற்பொறிகளையே குறிக்கும். வேறுசில தேடுபொறிகள் உள்ளூர் வலையமைப்பை மாத்திரமே தேடும். இணைய தேடு பொறிகள் பல பில்லியன் பக்கங்களில் இருந்து நமக்குத் தேவையான மிகப் பொருத்தமான பக்கங்களைத் தேடித் தரும். வேறுசில தேடற்பொறிகள் செய்திக் குழுக்கள், தகவற்தளங்கள், திறந்த இணையத்தளங்களைப் பட்டியலிடும் DMOZ.org போன்ற இணையத் தளங்களைத் தேடும். மனிதர்களால் எழுதப்பட்ட இணையத் தளங்களைப் பட்டியலிடும் தளங்களைப் போன்றல்லாது தேடு பொறிகள் அல்காரிதங்களைப் பாவித்துத் தேடல்களை மேற்கொள்ளும். வேறு சில தேடற்பொறிகளோ தமது இடைமுகத்தை வழங்கினாலும் உண்மையில் வேறுசில தேடுபொறிகளே தேடலை மேற்கொள்ளும். ஆரம்ப காலத்தில் ASCII முறை வரியுருக்களை கொண்டே தேடு சொற்களை உள்ளிட முடிந்தது. தற்போது ஒருங்குறி எழுத்துக்குறிமுறையை பல தேடுபொறிகளும் ஆதரிப்பதால் ஆங்கிலத்தில் மட்டுமல்லாது உலக மொழிகள் அனைத்திலும் அவ்வம் மொழிப்பக்கங்களை தேடிப்பெறக்கூடியதாகவுள்ளது.", - "timestamp": "2017-12-24T10:30:00Z", - "_type": "tawiki" - } -} diff --git a/example/wiki_doc_tewiki_1.json b/example/wiki_doc_tewiki_1.json deleted file mode 100644 index 2cb70b5..0000000 --- a/example/wiki_doc_tewiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "tewiki_1", - "fields": { - "title_te": "వెబ్ శోధనా యంత్రం", - "text_te": "వెబ్ శోధన యంత్రం అనేది వరల్డ్ వైడ్ వెబ్/ప్రపంచ వ్యాప్త వెబ్లో సమాచారాన్ని శోదించటానికి తయారుచేసిన ఒక సాధనం. శోధన ఫలితాలు సాధారణంగా ఒక జాబితాలో ఇవ్వబడతాయి మరియు అవి సాధారణంగా హిట్స్ అని పిలువబడతాయి. ఆ సమాచారం వెబ్ పేజీలు, చిత్రాలు, సమాచారం మరియు ఇతర రకాలైన జాబితాలను కలిగి ఉంటుంది.కొన్ని శోధనా యంత్రాలు డేటా బేస్ లు లేదా ఓపెన్ డైరెక్టరీలలో అందుబాటులో ఉన్న సమాచారాన్ని కూడా వెలికితీస్తాయి. మానవ సంపాదకులచే నిర్వహించబడే క్రమపరిచిన వెబ్ డైరెక్టరీల లా కాకుండా, శోధనా యంత్రాలు సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి ద్వారా లేదా సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి మరియు మానవ శక్తిల మిశ్రమంతో పనిచేస్తాయి.", - "timestamp": "2017-06-19T11:22:00Z", - "_type": "tewiki" - } -} diff --git a/example/wiki_doc_thwiki_1.json b/example/wiki_doc_thwiki_1.json deleted file mode 100644 index 9379367..0000000 --- a/example/wiki_doc_thwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "thwiki_1", - "fields": { - "title_th": "เสิร์ชเอนจิน", - "text_th": "เสิร์ชเอนจิน (search engine) หรือ โปรแกรมค้นหา คือ โปรแกรมที่ช่วยในการสืบค้นหาข้อมูล โดยเฉพาะข้อมูลบนอินเทอร์เน็ต โดยครอบคลุมทั้งข้อความ รูปภาพ ภาพเคลื่อนไหว เพลง ซอฟต์แวร์ แผนที่ ข้อมูลบุคคล กลุ่มข่าว และอื่น ๆ ซึ่งแตกต่างกันไปแล้วแต่โปรแกรมหรือผู้ให้บริการแต่ละราย. เสิร์ชเอนจินส่วนใหญ่จะค้นหาข้อมูลจากคำสำคัญ (คีย์เวิร์ด) ที่ผู้ใช้ป้อนเข้าไป จากนั้นก็จะแสดงรายการผลลัพธ์ที่มันคิดว่าผู้ใช้น่าจะต้องการขึ้นมา ในปัจจุบัน เสิร์ชเอนจินบางตัว เช่น กูเกิล จะบันทึกประวัติการค้นหาและการเลือกผลลัพธ์ของผู้ใช้ไว้ด้วย และจะนำประวัติที่บันทึกไว้นั้น มาช่วยกรองผลลัพธ์ในการค้นหาครั้งต่อ ๆ ไป", - "timestamp": "2016-06-18T11:06:00Z", - "_type": "thwiki" - } -} diff --git a/example/wiki_doc_trwiki_1.json b/example/wiki_doc_trwiki_1.json deleted file mode 100644 index 14dace8..0000000 --- a/example/wiki_doc_trwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "trwiki_1", - "fields": { - "title_tr": "Arama motoru", - "text_tr": "Arama motoru, İnternet üzerinde bulunan içeriği aramak için kullanılan bir mekanizmadır. Üç bileşenden oluşur: web robotu, arama indeksi ve kullanıcı arabirimi. Ancak arama sonuçları genellikle sık tıklanan internet sayfalarından oluşan bir liste olarak verilmektedir.", - "timestamp": "2018-03-13T17:37:00Z", - "_type": "trwiki" - } -} diff --git a/example/wiki_doc_zhwiki_1.json b/example/wiki_doc_zhwiki_1.json deleted file mode 100644 index 98f1376..0000000 --- a/example/wiki_doc_zhwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "zhwiki_1", - "fields": { - "title_zh": "搜索引擎", - "text_zh": "搜索引擎(英语:search engine)是一种信息检索系统,旨在协助搜索存储在计算机系统中的信息。搜索结果一般被称为“hits”,通常会以表单的形式列出。网络搜索引擎是最常见、公开的一种搜索引擎,其功能为搜索万维网上储存的信息.", - "timestamp": "2018-08-27T05:47:00Z", - "_type": "zhwiki" - } -} diff --git a/examples/example_bulk_delete.txt b/examples/example_bulk_delete.txt new file mode 100644 index 0000000..3bb459b --- /dev/null +++ b/examples/example_bulk_delete.txt @@ -0,0 +1,11 @@ +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 diff --git a/examples/example_bulk_index.json b/examples/example_bulk_index.json new file mode 100644 index 0000000..fab37a7 --- /dev/null +++ b/examples/example_bulk_index.json @@ -0,0 +1,11 @@ +{"id": "1","fields": {"title": "Blast", "text": "Blast is a full text search and indexing server, written in Go, built on top of Bleve.", "timestamp": "2019-12-16T07:12:00Z", "_type": "example"}} +{"id": "2","fields": {"title": "Bleve", "text": "Bleve is a modern text indexing library for go.", "timestamp": "2019-10-30T16:13:00Z", "_type": "example"}} +{"id": "3","fields": {"title": "Riot", "text": "Riot is Go Open Source, Distributed, Simple and efficient full text search engine.", "timestamp": "2019-12-16T07:12:00Z", "_type": "example"}} +{"id": "4","fields": {"title": "Bayard", "text": "Bayard is a full text search and indexing server, written in Rust, built on top of Tantivy.", "timestamp": "2019-12-19T10:41:00Z", "_type": "example"}} +{"id": "5","fields": {"title": "Toshi", "text": "Toshi is meant to be a full-text search engine similar to Elasticsearch. Toshi strives to be to Elasticsearch what Tantivy is to Lucene.", "timestamp": "2019-12-02T04:00:00Z", "_type": "example"}} +{"id": "6","fields": {"title": "Tantivy", "text": "Tantivy is a full-text search engine library inspired by Apache Lucene and written in Rust.", "timestamp": "2019-12-19T10:07:00Z", "_type": "example"}} +{"id": "7","fields": {"title": "Sonic", "text": "Sonic is a fast, lightweight and schema-less search backend.", "timestamp": "2019-12-10T23:13:00Z", "_type": "example"}} +{"id": "8","fields": {"title": "Apache Solr", "text": "Solr is highly reliable, scalable and fault tolerant, providing distributed indexing, replication and load-balanced querying, automated failover and recovery, centralized configuration and more.", "timestamp": "2019-12-19T14:08:00Z", "_type": "example"}} +{"id": "9","fields": {"title": "Elasticsearch", "text": "Elasticsearch is a distributed, open source search and analytics engine for all types of data, including textual, numerical, geospatial, structured, and unstructured.", "timestamp": "2019-12-19T08:19:00Z", "_type": "example"}} +{"id": "10","fields": {"title": "Lucene", "text": "Apache Lucene is a high-performance, full-featured text search engine library written entirely in Java.", "timestamp": "2019-12-19T14:08:00Z", "_type": "example"}} +{"id": "11","fields": {"title": "Whoosh", "text": "Whoosh is a fast, pure Python search engine library.", "timestamp": "2019-10-08T05:30:26Z", "_type": "example"}} diff --git a/examples/example_doc_1.json b/examples/example_doc_1.json new file mode 100644 index 0000000..09f6cad --- /dev/null +++ b/examples/example_doc_1.json @@ -0,0 +1,8 @@ +{ + "fields": { + "title": "Blast", + "text": "Blast is a full text search and indexing server, written in Go, built on top of Bleve.", + "timestamp": "2019-12-16T07:12:00Z", + "_type": "example" + } +} diff --git a/examples/example_mapping.json b/examples/example_mapping.json new file mode 100644 index 0000000..118348c --- /dev/null +++ b/examples/example_mapping.json @@ -0,0 +1,103 @@ +{ + "types": { + "example": { + "enabled": true, + "dynamic": true, + "properties": { + "title": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "en", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "en" + }, + "text": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "en", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "en" + }, + "url": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "keyword", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "keyword" + }, + "timestamp": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "datetime", + "store": true, + "index": true, + "include_in_all": true + } + ], + "default_analyzer": "" + }, + "_type": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "keyword", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "keyword" + } + }, + "default_analyzer": "en" + } + }, + "default_mapping": { + "enabled": true, + "dynamic": true, + "default_analyzer": "standard" + }, + "type_field": "_type", + "default_type": "_default", + "default_analyzer": "standard", + "default_datetime_parser": "dateTimeOptional", + "default_field": "_all", + "store_dynamic": true, + "index_dynamic": true, + "analysis": { + "analyzers": {}, + "char_filters": {}, + "tokenizers": {}, + "token_filters": {}, + "token_maps": {} + } +} diff --git a/example/wiki_search_request.json b/examples/example_search_request.json similarity index 100% rename from example/wiki_search_request.json rename to examples/example_search_request.json diff --git a/example/wiki_search_request_prefix.json b/examples/example_search_request_prefix.json similarity index 100% rename from example/wiki_search_request_prefix.json rename to examples/example_search_request_prefix.json diff --git a/example/wiki_search_request_simple.json b/examples/example_search_request_simple.json similarity index 100% rename from example/wiki_search_request_simple.json rename to examples/example_search_request_simple.json diff --git a/examples/geo_example_bulk_index.json b/examples/geo_example_bulk_index.json new file mode 100644 index 0000000..fbcbad7 --- /dev/null +++ b/examples/geo_example_bulk_index.json @@ -0,0 +1,6 @@ +{"id":"1","fields":{"name":"Brewpub-on-the-Green","city":"Fremont","state":"California","code":"","country":"United States","phone":"","website":"","updated":"2010-07-22 20:00:20","description":"","address":[],"geo":{"accuracy":"APPROXIMATE","lat":37.5483,"lon":-121.989},"_type":"geo_example"}} +{"id":"2","fields":{"name":"Capital City Brewing Company","city":"Washington","state":"District of Columbia","code":"20005","country":"United States","phone":"202.628.2222","website":"http://www.capcitybrew.com","updated":"2010-07-22 20:00:20","description":"Washington DC's first brewpub since prohibition, Capitol City Brewing Co. opened its doors in 1992. Our first location still stands in Downtown DC, at 11th and H St., NW. Our company policy is to bring the fine craft of brewing to every person who lives and visits our region, as well as treating them to a wonderful meal and a great experience.","address":["1100 New York Ave, NW"],"geo":{"accuracy":"ROOFTOP","lat":38.8999,"lon":-77.0272},"_type":"geo_example"}} +{"id":"3","fields":{"name":"Firehouse Grill & Brewery","city":"Sunnyvale","state":"California","code":"94086","country":"United States","phone":"1-408-773-9500","website":"","updated":"2010-07-22 20:00:20","description":"","address":["111 South Murphy Avenue"],"geo":{"accuracy":"RANGE_INTERPOLATED","lat":37.3775,"lon":-122.03},"_type":"geo_example"}} +{"id":"4","fields":{"name":"Hook & Ladder Brewing Company","city":"Silver Spring","state":"Maryland","code":"20910","country":"United States","phone":"301.565.4522","website":"http://www.hookandladderbeer.com","updated":"2010-07-22 20:00:20","description":"At Hook & Ladder Brewing we believe in great beer in the company of good friends, so we bring you three great beers for your drinking pleasure (please drink responsibly). Each of our beers is carefully crafted with the finest quality ingredients for a distinctive taste we know you will enjoy. Try one tonight, you just might get hooked. Through our own experiences in the fire and rescue service we have chosen the Hook & Ladder as a symbol of pride and honor to pay tribute to the brave men and women who serve and protect our communities.","address":["8113 Fenton St."],"geo":{"accuracy":"ROOFTOP","lat":38.9911,"lon":-77.0237},"_type":"geo_example"}} +{"id":"5","fields":{"name":"Jack's Brewing","city":"Fremont","state":"California","code":"94538","country":"United States","phone":"1-510-796-2036","website":"","updated":"2010-07-22 20:00:20","description":"","address":["39176 Argonaut Way"],"geo":{"accuracy":"ROOFTOP","lat":37.5441,"lon":-121.988},"_type":"geo_example"}} +{"id":"6","fields":{"name":"Sweet Water Tavern and Brewery","city":"Sterling","state":"Virginia","code":"20121","country":"United States","phone":"(703) 449-1108","website":"http://www.greatamericanrestaurants.com/sweetMainSter/index.htm","updated":"2010-07-22 20:00:20","description":"","address":["45980 Waterview Plaza"],"geo":{"accuracy":"RANGE_INTERPOLATED","lat":39.0324,"lon":-77.4097},"_type":"geo_example"}} diff --git a/example/geo_doc_1.json b/examples/geo_example_doc_1.json similarity index 89% rename from example/geo_doc_1.json rename to examples/geo_example_doc_1.json index 9cbc825..c359461 100644 --- a/example/geo_doc_1.json +++ b/examples/geo_example_doc_1.json @@ -1,5 +1,4 @@ { - "id": "1", "fields": { "name": "Brewpub-on-the-Green", "city": "Fremont", @@ -8,7 +7,6 @@ "country": "United States", "phone": "", "website": "", - "type": "brewery", "updated": "2010-07-22 20:00:20", "description": "", "address": [], @@ -16,6 +14,7 @@ "accuracy": "APPROXIMATE", "lat": 37.5483, "lon": -121.989 - } + }, + "_type": "geo_example" } } diff --git a/example/geo_index_mapping.json b/examples/geo_example_mapping.json similarity index 60% rename from example/geo_index_mapping.json rename to examples/geo_example_mapping.json index f067367..ba7769e 100644 --- a/example/geo_index_mapping.json +++ b/examples/geo_example_mapping.json @@ -1,6 +1,6 @@ { "types": { - "brewery": { + "geo_example": { "properties": { "name": { "fields": [ @@ -32,5 +32,23 @@ } } }, - "default_type": "brewery" + "default_mapping": { + "enabled": true, + "dynamic": true, + "default_analyzer": "standard" + }, + "type_field": "_type", + "default_type": "_default", + "default_analyzer": "standard", + "default_datetime_parser": "dateTimeOptional", + "default_field": "_all", + "store_dynamic": true, + "index_dynamic": true, + "analysis": { + "analyzers": {}, + "char_filters": {}, + "tokenizers": {}, + "token_filters": {}, + "token_maps": {} + } } diff --git a/example/geo_search_request.json b/examples/geo_example_search_request.json similarity index 100% rename from example/geo_search_request.json rename to examples/geo_example_search_request.json diff --git a/examples/multiple_type_example_bulk_index.json b/examples/multiple_type_example_bulk_index.json new file mode 100644 index 0000000..b8ab6ff --- /dev/null +++ b/examples/multiple_type_example_bulk_index.json @@ -0,0 +1,36 @@ +{"id":"ar_1","fields":{"title_ar":"محرك بحث","text_ar":"محرك البحث (بالإنجليزية: Search engine) هو نظام لإسترجاع المعلومات صمم للمساعدة على البحث عن المعلومات المخزنة على أي نظام حاسوبي. تعرض نتائج البحث عادة على شكل قائمة لأماكن تواجد المعلومات ومرتبة وفق معايير معينة. تسمح محركات البحث باختصار مدة البحث والتغلب على مشكلة أحجام البيانات المتصاعدة (إغراق معلوماتي).","timestamp":"2018-03-25T18:04:00Z","_type":"ar"}} +{"id":"bg_1","fields":{"title_bg":"Търсачка","text_bg":"Търсачка или търсеща машина (на английски: Web search engine) е специализиран софтуер за извличане на информация, съхранена в компютърна система или мрежа. Това може да е персонален компютър, Интернет, корпоративна мрежа и т.н. Без допълнителни уточнения, най-често под търсачка се разбира уеб(-)търсачка, която търси в Интернет. Други видове търсачки са корпоративните търсачки, които търсят в интранет мрежите, личните търсачки – за индивидуалните компютри и мобилните търсачки. В търсачката потребителят (търсещият) прави запитване за съдържание, отговарящо на определен критерий (обикновено такъв, който съдържа определени думи и фрази). В резултат се получават списък от точки, които отговарят, пълно или частично, на този критерий. Търсачките обикновено използват редовно подновявани индекси, за да оперират бързо и ефикасно. Някои търсачки също търсят в информацията, която е на разположение в нюзгрупите и други големи бази данни. За разлика от Уеб директориите, които се поддържат от хора редактори, търсачките оперират алгоритмично. Повечето Интернет търсачки са притежавани от различни корпорации.","timestamp":"2018-07-11T11:03:00Z","_type":"bg"}} +{"id":"ca_1","fields":{"title_ca":"Motor de cerca","text_ca":"Un motor de cerca o de recerca o bé cercador és un programa informàtic dissenyat per ajudar a trobar informació emmagatzemada en un sistema informàtic com ara una xarxa, Internet, un servidor o un ordinador personal. L'objectiu principal és el de trobar altres programes informàtics, pàgines web i documents, entre d'altres. A partir d'una determinada paraula o paraules o una determinada frase l'usuari demana un contingut sota un criteri determinat i retorna una llista de referències que compleixin aquest criteri. El procés es realitza a través de les metadades, vies per comunicar informació que utilitzen els motors per cada cerca. Els índex que utilitzen els cercadors sempre estan actualitzats a través d'un robot web per generar rapidesa i eficàcia en la recerca. Els directoris, en canvi, són gestionats per editors humans.","timestamp":"2018-07-09T18:07:00Z","_type":"ca"}} +{"id":"cs_1","fields":{"title_cs":"Vyhledávač","text_cs":"Vyhledávač je počítačový systém či program, který umožňuje uživateli zadat nějaký libovolný nebo specifikovaný vyhledávaný výraz a získat z velkého objemu dat informace, které jsou v souladu s tímto dotazem. Jako vyhledávač se označují i ​​webové stránky, jejichž hlavní funkcí je poskytování takového systému či programu. Jako internetový vyhledávač se označuje buď vyhledávač, na který se přistupuje přes internet, nebo vyhledávač, jehož zdrojem vyhledávání je internet (tj. WWW, Usenet apod.). Jako online vyhledávač se označuje vyhledávač, při jehož výkonu činnosti dochází k výměně dat v rámci nějaké počítačové sítě, nejčastěji to je internetový vyhledávač. Fulltextový vyhledávač je vyhedávač, který vykonává fulltextové vyhledávání.","timestamp":"2017-11-10T21:59:00Z","_type":"cs"}} +{"id":"da_1","fields":{"title_da":"Søgemaskine","text_da":"En søgemaskine er en applikation til at hjælpe en bruger med at finde information. Det kan f.eks. være at finde filer med bestemte data (f.eks. ord), gemt i en computers hukommelse, for eksempel via World Wide Web (kaldes så en websøgemaskine). Ofte bruges søgemaskine fejlagtigt om linkkataloger eller Netguider.","timestamp":"2017-09-04T01:54:00Z","_type":"da"}} +{"id":"de_1","fields":{"title_de":"Suchmaschine","text_de":"Eine Suchmaschine ist ein Programm zur Recherche von Dokumenten, die in einem Computer oder einem Computernetzwerk wie z. B. dem World Wide Web gespeichert sind. Internet-Suchmaschinen haben ihren Ursprung in Information-Retrieval-Systemen. Sie erstellen einen Schlüsselwort-Index für die Dokumentbasis, um Suchanfragen über Schlüsselwörter mit einer nach Relevanz geordneten Trefferliste zu beantworten. Nach Eingabe eines Suchbegriffs liefert eine Suchmaschine eine Liste von Verweisen auf möglicherweise relevante Dokumente, meistens dargestellt mit Titel und einem kurzen Auszug des jeweiligen Dokuments. Dabei können verschiedene Suchverfahren Anwendung finden.","timestamp":"2017-09-04T01:54:00Z","_type":"de"}} +{"id":"el_1","fields":{"title_el":"Μηχανή αναζήτησης","text_el":"Μια μηχανή αναζήτησης είναι μια εφαρμογή που επιτρέπει την αναζήτηση κειμένων και αρχείων στο Διαδίκτυο. Αποτελείται από ένα πρόγραμμα υπολογιστή που βρίσκεται σε έναν ή περισσότερους υπολογιστές στους οποίους δημιουργεί μια βάση δεδομένων με τις πληροφορίες που συλλέγει από το διαδίκτυο, και το διαδραστικό περιβάλλον που εμφανίζεται στον τελικό χρήστη ο οποίος χρησιμοποιεί την εφαρμογή από άλλον υπολογιστή συνδεδεμένο στο διαδίκτυο. Οι μηχανές αναζήτησης αποτελούνται από 3 είδη λογισμικού, το spider software, το index software και το query software.","timestamp":"2017-11-21T19:57:00Z","_type":"el"}} +{"id":"en_1","fields":{"title_en":"Search engine (computing)","text_en":"A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.","timestamp":"2018-07-04T05:41:00Z","_type":"en"}} +{"id":"es_1","fields":{"title_es":"Motor de búsqueda","text_es":"Un motor de búsqueda o buscador es un sistema informático que busca archivos almacenados en servidores web gracias a su spider (también llamado araña web). Un ejemplo son los buscadores de Internet (algunos buscan únicamente en la web, pero otros lo hacen además en noticias, servicios como Gopher, FTP, etc.) cuando se pide información sobre algún tema. Las búsquedas se hacen con palabras clave o con árboles jerárquicos por temas; el resultado de la búsqueda «Página de resultados del buscador» es un listado de direcciones web en los que se mencionan temas relacionados con las palabras clave buscadas. Como operan de forma automática, los motores de búsqueda contienen generalmente más información que los directorios. Sin embargo, estos últimos también han de construirse a partir de búsquedas (no automatizadas) o bien a partir de avisos dados por los creadores de páginas.","timestamp":"2018-08-30T11:30:00Z","_type":"es"}} +{"id":"fa_1","fields":{"title_fa":"موتور جستجو (پردازش)","text_fa":"موتور جستجو یا جویشگر، در فرهنگ رایانه، به طور عمومی به برنامه‌ای گفته می‌شود که کلمات کلیدی را در یک سند یا بانک اطلاعاتی جستجو می‌کند. در اینترنت به برنامه‌ای گفته می‌شود که کلمات کلیدی موجود در فایل‌ها و سندهای وب جهانی، گروه‌های خبری، منوهای گوفر و آرشیوهای FTP را جستجو می‌کند. جویشگرهای زیادی وجود دارند که امروزه از معروفترین و پراستفاده‌ترین آنها می‌توان به google و یاهو! جستجو اشاره کرد.","timestamp":"2017-01-06T02:46:00Z","_type":"fa"}} +{"id":"fi_1","fields":{"title_fi":"Hakukone","text_fi":"Hakukone on web-pohjainen ohjelma, joka etsii jatkuvasti Internetistä (varsinkin Webistä) uusia sivuja eritellen ja liittäen ne hakemistoonsa erityisten hakusanojen mukaan. Näitä hyväksi käyttäen hakukone tulostaa käyttäjän syöttämiä hakusanoja lähimpänä olevat sivut. Analysointi tapahtuu käytännössä eri hakukoneissa erilaisilla menetelmillä.","timestamp":"2017-10-04T14:33:00Z","_type":"fi"}} +{"id":"fr_1","fields":{"title_fr":"Moteur de recherche","text_fr":"Un moteur de recherche est une application web permettant de trouver des ressources à partir d'une requête sous forme de mots. Les ressources peuvent être des pages web, des articles de forums Usenet, des images, des vidéos, des fichiers, etc. Certains sites web offrent un moteur de recherche comme principale fonctionnalité ; on appelle alors « moteur de recherche » le site lui-même. Ce sont des instruments de recherche sur le web sans intervention humaine, ce qui les distingue des annuaires. Ils sont basés sur des « robots », encore appelés « bots », « spiders «, « crawlers » ou « agents », qui parcourent les sites à intervalles réguliers et de façon automatique pour découvrir de nouvelles adresses (URL). Ils suivent les liens hypertextes qui relient les pages les unes aux autres, les uns après les autres. Chaque page identifiée est alors indexée dans une base de données, accessible ensuite par les internautes à partir de mots-clés. C'est par abus de langage qu'on appelle également « moteurs de recherche » des sites web proposant des annuaires de sites web : dans ce cas, ce sont des instruments de recherche élaborés par des personnes qui répertorient et classifient des sites web jugés dignes d'intérêt, et non des robots d'indexation. Les moteurs de recherche ne s'appliquent pas qu'à Internet : certains moteurs sont des logiciels installés sur un ordinateur personnel. Ce sont des moteurs dits « de bureau » qui combinent la recherche parmi les fichiers stockés sur le PC et la recherche parmi les sites Web — on peut citer par exemple Exalead Desktop, Google Desktop et Copernic Desktop Search, Windex Server, etc. On trouve également des métamoteurs, c'est-à-dire des sites web où une même recherche est lancée simultanément sur plusieurs moteurs de recherche, les résultats étant ensuite fusionnés pour être présentés à l'internaute. On peut citer dans cette catégorie Ixquick, Mamma, Kartoo, Framabee ou Lilo.","timestamp":"2018-05-30T15:15:00Z","_type":"fr"}} +{"id":"ga_1","fields":{"title_ga":"Inneall cuardaigh","text_ga":"Acmhainn ar an ngréasán domhanda atá insroichte le brabhsálaí Gréasáin, a chabhraíonn leis an úsáideoir ionaid is eolas a aimsiú. Bíonn na hinnill cuardaigh (Yahoo, Lycos, Google, Ask Jeeves) ag cuardach tríd an ngréasán an t-am ar fad, ag tógáil innéacsanna ábhar éagsúla — mar shampla, ag aimsiú teidil, fotheidil, eochairfhocail is céadlínte cáipéisí. Uaidh sin, is féidir cuid mhaith cáipéisí éagsúla ar ábhar ar leith a aisghabháil. Déanann an cuardach leanúnach cinnte de go bhfuil na hinnéacsanna suas chun dáta. Mar sin féin, aisghabhann na hinnill an-chuid cháipéisí nach mbaineann le hábhar, agus tá an-iarracht ar siúl an t-am ar fad iad a fheabhsú.","timestamp":"2013-10-27T18:17:00Z","_type":"ga"}} +{"id":"gl_1","fields":{"title_gl":"Motor de busca","text_gl":"Un motor de busca ou buscador é un sistema informático que procura arquivos almacenados en servidores web, un exemplo son os buscadores de internet (algúns buscan só na Web pero outros buscan ademais en News, Gopher, FTP etc.) cando lles pedimos información sobre algún tema. As procuras fanse con palabras clave ou con árbores xerárquicas por temas; o resultado da procura é unha listaxe de direccións Web nas que se mencionan temas relacionados coas palabras clave buscadas.","timestamp":"2016-10-31T13:33:00Z","_type":"gl"}} +{"id":"gu_1","fields":{"title_gu":"વેબ શોધ એન્જીન","text_gu":"વેબ શોધ એન્જિન એ વર્લ્ડ વાઈડ વેબ (World Wide Web) પર વિવિધ માહિતી શોધવા માટે ઉપયોગમાં લેવામાં આવે છે. શોધ લીસ્ટને સામાન્ય રીતે યાદીમાં દર્શાવવામાં આવે છે અને જેને સામાન્ય રીતે હીટ્સ કહેવામાં આવે છે. જે માહિતી મળે છે તેમાં વેબ પૃષ્ઠ (web page), છબીઓ, માહિતી અને અન્ય પ્રકારની ફાઈલો હોય છે. કેટલાક શોધ એન્જિનો ન્યુઝબુક, ડેટાબેઝ અને અન્ય પ્રકારની ઓપન ડીરેક્ટરી (open directories)ઓની વિગતો પણ આપે છે. વ્યકિતઓ દ્વારા દુરસ્ત થતી વેબ ડાયરેક્ટરીઝ (Web directories)થી અલગ રીતે, શોધ એન્જિન ઍલ્ગરિધમનો અથવા ઍલ્ગરિધમ (algorithmic) અને માનવીય બાબતોના મિક્ષણનો ઉપયોગ કરે છે.","timestamp":"2013-04-04T19:28:00Z","_type":"gu"}} +{"id":"hi_1","fields":{"title_hi":"खोज इंजन","text_hi":"ऐसे कम्प्यूटर प्रोग्राम खोजी इंजन (search engine) कहलाते हैं जो किसी कम्प्यूटर सिस्टम पर भण्डारित सूचना में से वांछित सूचना को ढूढ निकालते हैं। ये इंजन प्राप्त परिणामों को प्रायः एक सूची के रूप में प्रस्तुत करते हैं जिससे वांछित सूचना की प्रकृति और उसकी स्थिति का पता चलता है। खोजी इंजन किसी सूचना तक अपेक्षाकृत बहुत कम समय में पहुँचने में हमारी सहायता करते हैं। वे 'सूचना ओवरलोड' से भी हमे बचाते हैं। खोजी इंजन का सबसे प्रचलित रूप 'वेब खोजी इंजन' है जो वर्ल्ड वाइड वेब पर सूचना खोजने के लिये प्रयुक्त होता है।","timestamp":"2017-10-19T20:09:00Z","_type":"hi"}} +{"id":"hu_1","fields":{"title_hu":"Keresőmotor","text_hu":"A keresőmotor az informatikában egy program vagy alkalmazás, amely bizonyos feltételeknek (többnyire egy szónak vagy kifejezésnek) megfelelő információkat keres valamilyen számítógépes környezetben. Ez a cikk a World Wide Weben (és esetleg az internet más részein, például a Useneten) kereső alkalmazásokról szól, a keresőmotor kifejezés önmagában általában ezekre vonatkozik. Másfajta keresőmotorokra példák a vállalati keresőmotorok, amik egy intraneten, és a személyi keresőmotorok, amik egy személyi számítógép állományai között keresnek.","timestamp":"2018-05-15T20:40:00Z","_type":"hu"}} +{"id":"hy_1","fields":{"title_hy":"Որոնողական համակարգ","text_hy":"Որոնողական համակարգը գործիք է, որը նախատեսված է համապատասխան բառերով Համաշխարհային ցանցում որոնումներ կատարելու համար։ Ստեղծված է համացանցում և FTP սերվերներում ինֆորմացիա փնտրելու համար։ Փնտրված արդյունքները ընդհանրապես ներկայացվում են արդյունքների ցանկում և սովորաբար կոչվում են նպատակակակետ, հիթ։ Ինֆորմացիան կարող է բաղկացած լինել վեբ էջերից, նկարներից, ինֆորմացիաներից և այլ տիպի ֆայլերից ու տվյալներից։ Այն կարող է օգտագործվել տարբեր տեսակի տեղեկատվություն որոնելու համար, ներառյալ՝ կայքեր, ֆորումներ, նկարներ, վիդեոներ, ֆայլեր և այլն։ Որոշ կայքեր արդեն իրենցից ներկայացնում են ինչ-որ որոնողական համակարգ, օրինակ՝ Dailymotion, YouTube և Google Videos ինտերնետում տեղադրված տեսահոլովակների որոնողական կայքեր են։ Որոնողական կայքը բաղկացած է \"ռոբոտներից\", որոնց անվանում են նաև bot, spider, crawler, որոնք ավտոմատ կերպով, առանց մարդկային միջամտության պարբերաբար հետազոտում են կայքերը։ Որոնողական կայքերը հետևում են հղումներին, որոնք կապված լինելով իրար հետ ինդեքսավորում է յուրաքանչյուր էջ տվյալների բազայում՝ հետագայում բանալի բառերի օգնությամբ դառնալով հասանելի ինտերնետից օգտվողների համար։ Սխալմամբ, որոնողական կայքեր են անվանում նաև այն կայքերը, որոնք իրենցից ներկայացնում են կայքային տեղեկատուներ։ Այս կայքերում ուշադրության արժանի կայքերը ցուցակագրվում և դասակարգվում են մարդկային ռեսուրսների շնորհիվ, այլ ոչ թե բոտերի կամ ռոբետների միջոցով։ Այդ կայքերից կարելի է նշել օրինակ՝ Yahoo!։ Yahoo!-ի որոնողական կայքը գտնվում է այստեղ։ Բոլոր որոնողական համակարգերը նախատեսված են ինտերնետում որոնում իրականացնելու համար, սակայն կան որոշ որոնողական համակարգերի տարատեսակներ, որոնք համակարգչային ծրագրեր են և հետևաբար տեղակայվում են համակարգչի մեջ։ Այս համակարգերը կոչվում են desktop։ Վերջիներս հնարավորություն են տալիս որոնելու թե համակարգչի մեջ կուտակված ֆայլեը, թե կայքերում տեղադրված ռեսուրսները։ Այդ ծրագրերից ամենահայտնիներն են՝ Exalead Desktop, Copernic Desktop Search Գոյություն ունեն նաև մետա-որոնողական համակարգեր, այսինքն կայքեր, որ նույն որոնումը կատարում են միաժամանակ տարբեր որոնողական կայքերի միջնորդությամբ։ Որոնման արդյունքները հետո դասակարգվում են որպեսզի ներկայացվեն օգտագործողին։ Մետա-որոնողական համակարգերի շարքից կարելի է թվարկել օրինակ՝ Mamma և Kartoo։","timestamp":"2017-11-20T17:47:00Z","_type":"hy"}} +{"id":"id_1","fields":{"title_id":"Mesin pencari web","text_id":"Mesin pencari web atau mesin telusur web (bahasa Inggris: web search engine) adalah program komputer yang dirancang untuk melakukan pencarian atas berkas-berkas yang tersimpan dalam layanan www, ftp, publikasi milis, ataupun news group dalam sebuah ataupun sejumlah komputer peladen dalam suatu jaringan. Mesin pencari merupakan perangkat penelusur informasi dari dokumen-dokumen yang tersedia. Hasil pencarian umumnya ditampilkan dalam bentuk daftar yang seringkali diurutkan menurut tingkat akurasi ataupun rasio pengunjung atas suatu berkas yang disebut sebagai hits. Informasi yang menjadi target pencarian bisa terdapat dalam berbagai macam jenis berkas seperti halaman situs web, gambar, ataupun jenis-jenis berkas lainnya. Beberapa mesin pencari juga diketahui melakukan pengumpulan informasi atas data yang tersimpan dalam suatu basis data ataupun direktori web. Sebagian besar mesin pencari dijalankan oleh perusahaan swasta yang menggunakan algoritme kepemilikan dan basis data tertutup, di antaranya yang paling populer adalah safari Google (MSN Search dan Yahoo!). Telah ada beberapa upaya menciptakan mesin pencari dengan sumber terbuka (open source), contohnya adalah Htdig, Nutch, Egothor dan OpenFTS.","timestamp":"2017-11-20T17:47:00Z","_type":"id"}} +{"id":"it_1","fields":{"title_it":"Motore di ricerca","text_it":"Nell'ambito delle tecnologie di Internet, un motore di ricerca (in inglese search engine) è un sistema automatico che, su richiesta, analizza un insieme di dati (spesso da esso stesso raccolti) e restituisce un indice dei contenuti disponibili[1] classificandoli in modo automatico in base a formule statistico-matematiche che ne indichino il grado di rilevanza data una determinata chiave di ricerca. Uno dei campi in cui i motori di ricerca trovano maggiore utilizzo è quello dell'information retrieval e nel web. I motori di ricerca più utilizzati nel 2017 sono stati: Google, Bing, Baidu, Qwant, Yandex, Ecosia, DuckDuckGo.","timestamp":"2018-07-16T12:20:00Z","_type":"it"}} +{"id":"ja_1","fields":{"title_ja":"検索エンジン","text_ja":"検索エンジン(けんさくエンジン、英語: search engine)は、狭義にはインターネットに存在する情報(ウェブページ、ウェブサイト、画像ファイル、ネットニュースなど)を検索する機能およびそのプログラム。インターネットの普及初期には、検索としての機能のみを提供していたウェブサイトそのものを検索エンジンと呼んだが、現在では様々なサービスが加わったポータルサイト化が進んだため、検索をサービスの一つとして提供するウェブサイトを単に検索サイトと呼ぶことはなくなっている。広義には、インターネットに限定せず情報を検索するシステム全般を含む。狭義の検索エンジンは、ロボット型検索エンジン、ディレクトリ型検索エンジン、メタ検索エンジンなどに分類される。広義の検索エンジンとしては、ある特定のウェブサイト内に登録されているテキスト情報の全文検索機能を備えたソフトウェア(全文検索システム)等がある。検索エンジンは、検索窓と呼ばれるボックスにキーワードを入力して検索をかけるもので、全文検索が可能なものと不可能なものとがある。検索サイトを一般に「検索エンジン」と呼ぶことはあるが、厳密には検索サイト自体は検索エンジンでない。","timestamp":"2018-05-30T00:52:00Z","_type":"ja"}} +{"id":"kn_1","fields":{"title_kn":"ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ","text_kn":"ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ ಎಂದರೆ World Wide Webನಲ್ಲಿ ಮಾಹಿತಿ ಹುಡುಕುವುದಕ್ಕಾಗಿ ವಿನ್ಯಾಸಗೊಳಿಸಲಾದ ಒಂದು ಸಾಧನ. ಹುಡುಕಾಟದ ಫಲಿತಾಂಶಗಳನ್ನು ಸಾಮಾನ್ಯವಾಗಿ ಒಂದು ಪಟ್ಟಿಯ ರೂಪದಲ್ಲಿ ಪ್ರಸ್ತುತಪಡಿಸಲಾಗುತ್ತದೆ ಮತ್ತು ಇವನ್ನು ’ಹಿಟ್ಸ್’ ಎಂದು ಕರೆಯಲಾಗುತ್ತದೆ. ಈ ಮಾಹಿತಿಯು ಅನೇಕ ಜಾಲ ಪುಟಗಳು, ಚಿತ್ರಗಳು, ಮಾಹಿತಿ ಹಾಗೂ ಇತರೆ ಕಡತಗಳನ್ನು ಹೊಂದಿರಬಹುದು. ಕೆಲವು ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಬೇರೆ ದತ್ತಸಂಚಯಗಳು ಅಥವಾ ಮುಕ್ತ ಮಾಹಿತಿ ಸೂಚಿಗಳಿಂದ ದತ್ತಾಂಶಗಳ ಗಣಿಗಾರಿಕೆ ಮಾಡಿ ಹೊರತೆಗೆಯುತ್ತವೆ. ಜಾಲ ಮಾಹಿತಿಸೂಚಿಗಳನ್ನು ಸಂಬಂಧಿಸಿದ ಸಂಪಾದಕರು ನಿರ್ವಹಿಸಿದರೆ, ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಗಣನಪದ್ಧತಿಯ ಮೂಲಕ ಅಥವಾ ಗಣನಪದ್ಧತಿ ಮತ್ತು ಮಾನವ ಹೂಡುವಳಿಯ ಮಿಶ್ರಣದ ಮುಖಾಂತರ ಕಾರ್ಯನಿರ್ವಹಿಸುತ್ತವೆ.","timestamp":"2017-10-03T14:13:00Z","_type":"kn"}} +{"id":"ko_1","fields":{"title_cjk":"검색 엔진","text_cjk":"검색 엔진은 컴퓨터 시스템에 저장된 정보를 찾아주는 것을 도와주도록 설계된 정보 검색 시스템이다. 이러한 검색 결과는 목록으로 표현되는 것이 보통이다. 검색 엔진을 사용하면 정보를 찾는데 필요한 시간을 최소화할 수 있다. 가장 눈에 띄는 형태의 공용 검색 엔진으로는 웹 검색 엔진이 있으며 월드 와이드 웹에서 정보를 찾아준다.","timestamp":"2017-11-19T12:50:00Z","_type":"ko"}} +{"id":"ml_1","fields":{"title_ml":"വെബ് സെർച്ച് എഞ്ചിൻ","text_ml":"വേൾഡ് വൈഡ് വെബ്ബിലുള്ള വിവരങ്ങൾ തിരയാനുള്ള ഒരു ഉപാധിയാണ്‌ വെബ് സെർച്ച് എഞ്ചിൻ അഥവാ സെർച്ച് എഞ്ചിൻ. തിരച്ചിൽ ഫലങ്ങൾ സാധാരണായായി ഒരു പട്ടികയായി നൽകുന്നു, തിരച്ചിൽ ഫലങ്ങളെ ഹിറ്റുകൾ എന്നാണ്‌ വിളിച്ചുവരുന്നത്[അവലംബം ആവശ്യമാണ്]. തിരച്ചിൽ ഫലങ്ങളിൽ വെബ് പേജുകൾ, ചിത്രങ്ങൾ, വിവരങ്ങൾ, വെബ്ബിലുള്ള മറ്റ് ഫയൽ തരങ്ങൾ എന്നിവ ഉൾപ്പെടാം. അൽഗോരിതങ്ങൾ ഉപയോഗിച്ചാണ് സെർച്ച് എഞ്ചിനുകൾ പ്രവർത്തിക്കുന്നത്.","timestamp":"2010-05-05T15:06:00Z","_type":"ml"}} +{"id":"nl_1","fields":{"title_nl":"Zoekmachine","text_nl":"Een zoekmachine is een computerprogramma waarmee informatie kan worden gezocht in een bepaalde collectie; dit kan een bibliotheek, het internet, of een persoonlijke verzameling zijn. Zonder nadere aanduiding wordt meestal een webdienst bedoeld waarmee met behulp van vrije trefwoorden volledige tekst (full text) kan worden gezocht in het gehele wereldwijde web. In tegenstelling tot startpagina's of webgidsen is er geen of zeer weinig menselijke tussenkomst nodig; het bezoeken van de webpagina's en het sorteren van de rangschikkingen gebeurt met behulp van een algoritme. Google is wereldwijd de meest gebruikte zoekmachine, andere populaire zoekmachines zijn Yahoo!, Bing en Baidu.","timestamp":"2018-05-07T11:05:00Z","_type":"nl"}} +{"id":"no_1","fields":{"title_no":"Søkemotor","text_no":"En søkemotor er en type programvare som leter frem informasjon fra Internett (nettsider eller andre nettressurser) eller begrenset til et datasystem, der informasjonen samsvarer med et gitt søk, og rangerer treffene etter hva den oppfatter som mest relevant. Typisk ligger søkemotoren tilgjengelig som et nettsted, der brukeren legger inn søkeord ev. sammen med filterinnstillinger, og treffene vises gjerne som klikkbare lenker. Søkemotoren kan enten gjøre søk på hele Internett (for eksempel Google, Bing, Kvasir og Yahoo!), innenfor et bestemt nettsted (for eksempel søk innenfor VGs nettavis), eller innenfor et bestemt tema (f.eks. Kelkoo, som søker etter priser på produkter, og Picsearch, som søker etter bilder). En bedrift kan også sette opp en intern bedrifts-søkemotor for å få enklere tilgang til alle dokumenter og databaser i bedriften.","timestamp":"2018-02-05T14:15:00Z","_type":"no"}} +{"id":"ps_1","fields":{"title_ps":"انټرنټ لټوونکی ماشين","text_ps":"نټرنټ د معلوماتو يوه داسې پراخه نړۍ ده چې يوه پوله هم نه لري. هره ثانيه په زرگونو معلوماتي توکي په کې ورځای کېږي، خو بيا هم د ډکېدو کومه اندېښنه نه رامنځته کېږي. حيرانوونکې خبره بيا دا ده چې دغه ټول معلومات په داسې مهارت سره په دغه نړۍ کې ځای شوي دي، چې سړی يې د سترگو په رپ کې د نړۍ په هر گوټ کې ترلاسه کولای شي. د کيبورډ په يو دوو تڼيو زور کولو او د موږك په يو دوو کليکونو سره خپلو ټولو پوښتنو ته ځواب موندلای شئ. ټول معلومات په ځانگړو انټرنټ پاڼو کې خوندي وي، نو که سړي ته د يوې پاڼې پته معلومه وي نو سم له لاسه به دغه پاڼه د انټرنټ پاڼو په کتونکي پروگرام کې پرانيزي، خو که سړی بيا يو معلومات غواړي او د هغې پاڼې پته ورسره نه وي، چې دغه ځانگړي معلومات په كې ځای شوي دي، نو بيا سړی يوه داسې پياوړي ځواک ته اړتيا لري، چې د سترگو په رپ کې ټول انټرنټ چاڼ کړي او دغه ځانگړي معلومات راوباسي. له نېکه مرغه د دغه ځواک غم خوړل شوی دی او ډېرInternet Search Engine انټرنټ لټوونکي ماشينونه جوړ کړای شوي دي، چې په وړيا توگه ټول انټرنټ تر ثانيو هم په لږ وخت کې چاڼ کوي او زموږ د خوښې معلومات راښکاره کوي. دغو ماشينونو ته سړی يوه ځانگړې کليمه ورکوي او هغوی ټول انټرنټ په دغې وركړل شوې کلمې پسې لټوي او هر دقيق معلومات چې لاسته ورځي، نو د کمپيوټر پر پرده يې راښکاره کوي. د دغو ماشينونو په ډله کې يو پياوړی ماشين د Google په نوم دی. د نوموړي ماشين بنسټ په ١٩٩٨م کال کې د متحدو ايالاتو د Standford پوهنتون دوو محصلينو Larry Page او Sergey Brin کښېښود. د دغه ماشين خدمات سړی د www.google.com په انټرنټ پاڼه کې کارولای شي. نوموړی ماشين د نړۍ په گڼ شمېر ژبو باندې خدمات وړاندې کوي او داسې چټک او دقيق لټون کوي چې د انټرنټ نور ډېر غښتلي ماشينونه ورته گوته پر غاښ پاتې دي. گوگل په ټوله نړۍ کې کارول کېږي او تر نيمي ثانيي هم په لنډ وخت کې په ميليارډونو انټرنټ پاڼې چاڼ کوي او خپلو کاروونکو ته په پرتله ييزه توگه دقيق معلومات راباسي. گوگل په يوه ورځ کې څه كمُ ٢٠٠ ميليونه پوښتنې ځوابوي. دا ( گوگل) تورى خپله د يو امريکايي رياضيپوه د وراره له خوا په لومړي ځل د يوې لوبې لپاره کارول شوی و. هغه دغه تورى د يو سلو صفرونو ( 1000?.) غوندې لوی عدد ته د نوم په توگه کاراوه. دغه نوم د نوموړي شرکت د دغه توان ښكارندوى دى، چې په لنډ وخت کې په لويه کچه پوښتنو ته ځواب ورکوي او معلومات لټوي. سړی چې د گوگل چټکتيا او دقيقوالي ته ځير شي، نو دا پوښته راپورته کېږي چې د دې ماشين شا ته به څومره پرمختللي کمپيوټرونه او پياوړی تخنيک پټ وي. خو اصلاً د گوگل شا ته په يوه لوی جال کې د منځنۍ بيې کمپيوټرونه سره نښلول شوي دي . په دې توگه په زرگونو کمپيوټرونه هممهاله په کار بوخت وي، چې په ترڅ کې يې د معلوماتو لټول او چاڼ کول چټکتيا مومي. د يوې پوښتنې له اخيستلو څخه راواخله معلوماتو تر لټولو او بيا د دقيقوالي له مخې په يوه ځانگړي طرز بېرته کاروونکي يا پوښتونكي تر ښوولو پورې ټولې چارې د درېيو Software پروگرامونه په لاس کې دي، چې په دغه زرگونو کمپيوټرونو کې ځای پر ځای شوي دي.","timestamp":"2015-12-15T18:53:00Z","_type":"ps"}} +{"id":"pt_1","fields":{"title_pt":"Motor de busca","text_pt":"Motor de pesquisa (português europeu) ou ferramenta de busca (português brasileiro) ou buscador (em inglês: search engine) é um programa desenhado para procurar palavras-chave fornecidas pelo utilizador em documentos e bases de dados. No contexto da internet, um motor de pesquisa permite procurar palavras-chave em documentos alojados na world wide web, como aqueles que se encontram armazenados em websites. Os motores de busca surgiram logo após o aparecimento da Internet, com a intenção de prestar um serviço extremamente importante: a busca de qualquer informação na rede, apresentando os resultados de uma forma organizada, e também com a proposta de fazer isto de uma maneira rápida e eficiente. A partir deste preceito básico, diversas empresas se desenvolveram, chegando algumas a valer milhões de dólares. Entre as maiores empresas encontram-se o Google, o Yahoo, o Bing, o Lycos, o Cadê e, mais recentemente, a Amazon.com com o seu mecanismo de busca A9 porém inativo. Os buscadores se mostraram imprescindíveis para o fluxo de acesso e a conquista novos visitantes. Antes do advento da Web, havia sistemas para outros protocolos ou usos, como o Archie para sites FTP anônimos e o Veronica para o Gopher (protocolo de redes de computadores que foi desenhado para indexar repositórios de documentos na Internet, baseado-se em menus).","timestamp":"2017-11-09T14:38:00Z","_type":"pt"}} +{"id":"ro_1","fields":{"title_ro":"Motor de căutare","text_ro":"Un motor de căutare este un program apelabil căutător, care accesează Internetul în mod automat și frecvent și care stochează titlul, cuvinte cheie și, parțial, chiar conținutul paginilor web într-o bază de date. În momentul în care un utilizator apelează la un motor de căutare pentru a găsi o informație, o anumită frază sau un cuvânt, motorul de căutare se va uita în această bază de date și, în funcție de anumite criterii de prioritate, va crea și afișa o listă de rezultate (engleză: hit list ).","timestamp":"2018-06-12T08:59:00Z","_type":"ro"}} +{"id":"ru_1","fields":{"title_ru":"Поисковая машина","text_ru":"Поисковая машина (поиско́вый движо́к) — комплекс программ, предназначенный для поиска информации. Обычно является частью поисковой системы. Основными критериями качества работы поисковой машины являются релевантность (степень соответствия запроса и найденного, т.е. уместность результата), полнота индекса, учёт морфологии языка.","timestamp":"2017-03-22T01:16:00Z","_type":"ru"}} +{"id":"sv_1","fields":{"title_sv":"Söktjänst","text_sv":"En söktjänst är en webbplats som gör det möjligt att söka efter innehåll på Internet. Söktjänsterna använder sökmotorer, även kallade sökrobotar, för att upptäcka, hämta in och indexera webbsidor.","timestamp":"2018-08-16T22:13:00Z","_type":"sv"}} +{"id":"ta_1","fields":{"title_ta":"தேடுபொறி","text_ta":"தேடுபொறி அல்லது தேடற்பொறி என்பது ஒரு கணினி நிரலாகும். இது இணையத்தில் குவிந்து கிடக்கும் தகவல்களில் இருந்தோ கணினியில் இருக்கும் தகவல்களில் இருந்தோ நமக்குத் தேவையான தகவலைப்பெற உதவுகின்றது. பொதுவாகப் பாவனையாளர்கள் ஒரு விடயம் சம்பந்தமாகத் தேடுதலை ஒரு சொல்லை வைத்து தேடுவார்கள். தேடுபொறிகள் சுட்டிகளைப் பயன்படுத்தி விரைவான தேடலை மேற்கொள்ளும். தேடுபொறிகள் என்பது பொதுவாக இணையத் தேடுபொறிகளை அல்லது இணையத் தேடற்பொறிகளையே குறிக்கும். வேறுசில தேடுபொறிகள் உள்ளூர் வலையமைப்பை மாத்திரமே தேடும். இணைய தேடு பொறிகள் பல பில்லியன் பக்கங்களில் இருந்து நமக்குத் தேவையான மிகப் பொருத்தமான பக்கங்களைத் தேடித் தரும். வேறுசில தேடற்பொறிகள் செய்திக் குழுக்கள், தகவற்தளங்கள், திறந்த இணையத்தளங்களைப் பட்டியலிடும் DMOZ.org போன்ற இணையத் தளங்களைத் தேடும். மனிதர்களால் எழுதப்பட்ட இணையத் தளங்களைப் பட்டியலிடும் தளங்களைப் போன்றல்லாது தேடு பொறிகள் அல்காரிதங்களைப் பாவித்துத் தேடல்களை மேற்கொள்ளும். வேறு சில தேடற்பொறிகளோ தமது இடைமுகத்தை வழங்கினாலும் உண்மையில் வேறுசில தேடுபொறிகளே தேடலை மேற்கொள்ளும். ஆரம்ப காலத்தில் ASCII முறை வரியுருக்களை கொண்டே தேடு சொற்களை உள்ளிட முடிந்தது. தற்போது ஒருங்குறி எழுத்துக்குறிமுறையை பல தேடுபொறிகளும் ஆதரிப்பதால் ஆங்கிலத்தில் மட்டுமல்லாது உலக மொழிகள் அனைத்திலும் அவ்வம் மொழிப்பக்கங்களை தேடிப்பெறக்கூடியதாகவுள்ளது.","timestamp":"2017-12-24T10:30:00Z","_type":"ta"}} +{"id":"te_1","fields":{"title_te":"వెబ్ శోధనా యంత్రం","text_te":"వెబ్ శోధన యంత్రం అనేది వరల్డ్ వైడ్ వెబ్/ప్రపంచ వ్యాప్త వెబ్లో సమాచారాన్ని శోదించటానికి తయారుచేసిన ఒక సాధనం. శోధన ఫలితాలు సాధారణంగా ఒక జాబితాలో ఇవ్వబడతాయి మరియు అవి సాధారణంగా హిట్స్ అని పిలువబడతాయి. ఆ సమాచారం వెబ్ పేజీలు, చిత్రాలు, సమాచారం మరియు ఇతర రకాలైన జాబితాలను కలిగి ఉంటుంది.కొన్ని శోధనా యంత్రాలు డేటా బేస్ లు లేదా ఓపెన్ డైరెక్టరీలలో అందుబాటులో ఉన్న సమాచారాన్ని కూడా వెలికితీస్తాయి. మానవ సంపాదకులచే నిర్వహించబడే క్రమపరిచిన వెబ్ డైరెక్టరీల లా కాకుండా, శోధనా యంత్రాలు సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి ద్వారా లేదా సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి మరియు మానవ శక్తిల మిశ్రమంతో పనిచేస్తాయి.","timestamp":"2017-06-19T11:22:00Z","_type":"te"}} +{"id":"th_1","fields":{"title_th":"เสิร์ชเอนจิน","text_th":"เสิร์ชเอนจิน (search engine) หรือ โปรแกรมค้นหา คือ โปรแกรมที่ช่วยในการสืบค้นหาข้อมูล โดยเฉพาะข้อมูลบนอินเทอร์เน็ต โดยครอบคลุมทั้งข้อความ รูปภาพ ภาพเคลื่อนไหว เพลง ซอฟต์แวร์ แผนที่ ข้อมูลบุคคล กลุ่มข่าว และอื่น ๆ ซึ่งแตกต่างกันไปแล้วแต่โปรแกรมหรือผู้ให้บริการแต่ละราย. เสิร์ชเอนจินส่วนใหญ่จะค้นหาข้อมูลจากคำสำคัญ (คีย์เวิร์ด) ที่ผู้ใช้ป้อนเข้าไป จากนั้นก็จะแสดงรายการผลลัพธ์ที่มันคิดว่าผู้ใช้น่าจะต้องการขึ้นมา ในปัจจุบัน เสิร์ชเอนจินบางตัว เช่น กูเกิล จะบันทึกประวัติการค้นหาและการเลือกผลลัพธ์ของผู้ใช้ไว้ด้วย และจะนำประวัติที่บันทึกไว้นั้น มาช่วยกรองผลลัพธ์ในการค้นหาครั้งต่อ ๆ ไป","timestamp":"2016-06-18T11:06:00Z","_type":"th"}} +{"id":"tr_1","fields":{"title_tr":"Arama motoru","text_tr":"Arama motoru, İnternet üzerinde bulunan içeriği aramak için kullanılan bir mekanizmadır. Üç bileşenden oluşur: web robotu, arama indeksi ve kullanıcı arabirimi. Ancak arama sonuçları genellikle sık tıklanan internet sayfalarından oluşan bir liste olarak verilmektedir.","timestamp":"2018-03-13T17:37:00Z","_type":"tr"}} +{"id":"zh_1","fields":{"title_zh":"搜索引擎","text_zh":"搜索引擎(英语:search engine)是一种信息检索系统,旨在协助搜索存储在计算机系统中的信息。搜索结果一般被称为“hits”,通常会以表单的形式列出。网络搜索引擎是最常见、公开的一种搜索引擎,其功能为搜索万维网上储存的信息.","timestamp":"2018-08-27T05:47:00Z","_type":"zh"}} diff --git a/example/wiki_index_mapping.json b/examples/multiple_type_example_mapping.json similarity index 99% rename from example/wiki_index_mapping.json rename to examples/multiple_type_example_mapping.json index ac7c43b..36b6522 100644 --- a/example/wiki_index_mapping.json +++ b/examples/multiple_type_example_mapping.json @@ -1,6 +1,6 @@ { "types": { - "arwiki": { + "ar": { "enabled": true, "dynamic": true, "properties": { @@ -80,7 +80,7 @@ }, "default_analyzer": "ar" }, - "bgwiki": { + "bg": { "enabled": true, "dynamic": true, "properties": { @@ -160,7 +160,7 @@ }, "default_analyzer": "bg" }, - "cawiki": { + "ca": { "enabled": true, "dynamic": true, "properties": { @@ -240,7 +240,7 @@ }, "default_analyzer": "ca" }, - "cswiki": { + "cs": { "enabled": true, "dynamic": true, "properties": { @@ -320,7 +320,7 @@ }, "default_analyzer": "cs" }, - "dawiki": { + "da": { "enabled": true, "dynamic": true, "properties": { @@ -400,7 +400,7 @@ }, "default_analyzer": "da" }, - "dewiki": { + "de": { "enabled": true, "dynamic": true, "properties": { @@ -480,7 +480,7 @@ }, "default_analyzer": "de" }, - "elwiki": { + "el": { "enabled": true, "dynamic": true, "properties": { @@ -560,7 +560,7 @@ }, "default_analyzer": "el" }, - "enwiki": { + "en": { "enabled": true, "dynamic": true, "properties": { @@ -640,7 +640,7 @@ }, "default_analyzer": "en" }, - "eswiki": { + "es": { "enabled": true, "dynamic": true, "properties": { @@ -720,7 +720,7 @@ }, "default_analyzer": "es" }, - "fawiki": { + "fa": { "enabled": true, "dynamic": true, "properties": { @@ -800,7 +800,7 @@ }, "default_analyzer": "fa" }, - "fiwiki": { + "fi": { "enabled": true, "dynamic": true, "properties": { @@ -880,7 +880,7 @@ }, "default_analyzer": "fi" }, - "frwiki": { + "fr": { "enabled": true, "dynamic": true, "properties": { @@ -960,7 +960,7 @@ }, "default_analyzer": "fr" }, - "gawiki": { + "ga": { "enabled": true, "dynamic": true, "properties": { @@ -1040,7 +1040,7 @@ }, "default_analyzer": "ga" }, - "glwiki": { + "gl": { "enabled": true, "dynamic": true, "properties": { @@ -1120,7 +1120,7 @@ }, "default_analyzer": "gl" }, - "guwiki": { + "gu": { "enabled": true, "dynamic": true, "properties": { @@ -1200,7 +1200,7 @@ }, "default_analyzer": "in" }, - "hiwiki": { + "hi": { "enabled": true, "dynamic": true, "properties": { @@ -1280,7 +1280,7 @@ }, "default_analyzer": "hi" }, - "huwiki": { + "hu": { "enabled": true, "dynamic": true, "properties": { @@ -1360,7 +1360,7 @@ }, "default_analyzer": "hu" }, - "hywiki": { + "hy": { "enabled": true, "dynamic": true, "properties": { @@ -1440,7 +1440,7 @@ }, "default_analyzer": "hy" }, - "idwiki": { + "id": { "enabled": true, "dynamic": true, "properties": { @@ -1520,7 +1520,7 @@ }, "default_analyzer": "id" }, - "itwiki": { + "it": { "enabled": true, "dynamic": true, "properties": { @@ -1600,7 +1600,7 @@ }, "default_analyzer": "it" }, - "jawiki": { + "ja": { "enabled": true, "dynamic": true, "properties": { @@ -1680,7 +1680,7 @@ }, "default_analyzer": "ja" }, - "knwiki": { + "kn": { "enabled": true, "dynamic": true, "properties": { @@ -1760,7 +1760,7 @@ }, "default_analyzer": "in" }, - "kowiki": { + "ko": { "enabled": true, "dynamic": true, "properties": { @@ -1840,7 +1840,7 @@ }, "default_analyzer": "cjk" }, - "mlwiki": { + "ml": { "enabled": true, "dynamic": true, "properties": { @@ -1920,7 +1920,7 @@ }, "default_analyzer": "in" }, - "nlwiki": { + "nl": { "enabled": true, "dynamic": true, "properties": { @@ -2000,7 +2000,7 @@ }, "default_analyzer": "nl" }, - "nowiki": { + "no": { "enabled": true, "dynamic": true, "properties": { @@ -2080,7 +2080,7 @@ }, "default_analyzer": "no" }, - "pswiki": { + "ps": { "enabled": true, "dynamic": true, "properties": { @@ -2160,7 +2160,7 @@ }, "default_analyzer": "ckb" }, - "ptwiki": { + "pt": { "enabled": true, "dynamic": true, "properties": { @@ -2240,7 +2240,7 @@ }, "default_analyzer": "pt" }, - "rowiki": { + "ro": { "enabled": true, "dynamic": true, "properties": { @@ -2320,7 +2320,7 @@ }, "default_analyzer": "ro" }, - "ruwiki": { + "ru": { "enabled": true, "dynamic": true, "properties": { @@ -2400,7 +2400,7 @@ }, "default_analyzer": "ru" }, - "svwiki": { + "sv": { "enabled": true, "dynamic": true, "properties": { @@ -2480,7 +2480,7 @@ }, "default_analyzer": "sv" }, - "tawiki": { + "ta": { "enabled": true, "dynamic": true, "properties": { @@ -2560,7 +2560,7 @@ }, "default_analyzer": "in" }, - "tewiki": { + "te": { "enabled": true, "dynamic": true, "properties": { @@ -2640,7 +2640,7 @@ }, "default_analyzer": "in" }, - "thwiki": { + "th": { "enabled": true, "dynamic": true, "properties": { @@ -2720,7 +2720,7 @@ }, "default_analyzer": "th" }, - "trwiki": { + "tr": { "enabled": true, "dynamic": true, "properties": { @@ -2800,7 +2800,7 @@ }, "default_analyzer": "tr" }, - "zhwiki": { + "zh": { "enabled": true, "dynamic": true, "properties": { diff --git a/go.mod b/go.mod index c50a8ae..a218f2c 100644 --- a/go.mod +++ b/go.mod @@ -1,49 +1,51 @@ module github.com/mosuka/blast -go 1.13 +go 1.14 require ( - github.com/RoaringBitmap/roaring v0.4.21 // indirect - github.com/blevesearch/bleve v0.8.1 + github.com/RoaringBitmap/roaring v0.4.17 // indirect + github.com/bbva/raft-badger v1.0.0 + github.com/blevesearch/bleve v0.8.0 github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040 // indirect - github.com/blevesearch/cld2 v0.0.0-20150916130542-10f17c049ec9 // indirect - github.com/blevesearch/snowballstem v0.0.0-20180110192139-26b06a2c243d // indirect - github.com/couchbase/ghistogram v0.0.0-20170308220240-d910dd063dd6 // indirect + github.com/blevesearch/cld2 v0.0.0-20200327141045-8b5f551d37f5 // indirect + github.com/blevesearch/go-porterstemmer v1.0.2 // indirect + github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f // indirect + github.com/blevesearch/snowballstem v0.0.0-20200325004757-48afb64082dd // indirect + github.com/couchbase/ghistogram v0.1.0 // indirect github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498 // indirect - github.com/couchbase/vellum v0.0.0-20190829182332-ef2e028c01fd // indirect + github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe // indirect github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 // indirect + github.com/dgraph-io/badger/v2 v2.0.0 + github.com/edsrzf/mmap-go v1.0.0 // indirect github.com/etcd-io/bbolt v1.3.3 // indirect github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect github.com/gogo/protobuf v1.3.0 - github.com/golang/protobuf v1.3.2 - github.com/google/go-cmp v0.3.1 - github.com/gorilla/mux v1.7.3 - github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 + github.com/golang/protobuf v1.3.5 + github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/grpc-ecosystem/grpc-gateway v1.11.1 - github.com/hashicorp/raft v1.1.1 - github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477 - github.com/ikawaha/kagome.ipadic v1.1.0 // indirect - github.com/imdario/mergo v0.3.7 + github.com/grpc-ecosystem/grpc-gateway v1.14.3 + github.com/hashicorp/raft v1.1.2 + github.com/ikawaha/kagome.ipadic v1.1.2 // indirect github.com/jmhodges/levigo v1.0.0 // indirect - github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217 - github.com/mosuka/bbadger v0.1.0 + github.com/mash/go-accesslog v1.1.0 + github.com/mitchellh/go-homedir v1.1.0 github.com/natefinch/lumberjack v2.0.0+incompatible - github.com/prometheus/client_golang v1.1.0 + github.com/prometheus/client_golang v1.5.1 + github.com/prometheus/common v0.9.1 github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 // indirect - github.com/stretchr/objx v0.2.0 + github.com/spf13/cobra v0.0.7 + github.com/spf13/viper v1.4.0 + github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 // indirect github.com/syndtr/goleveldb v1.0.0 // indirect - github.com/tebeka/snowball v0.3.0 // indirect + github.com/tebeka/snowball v0.4.1 // indirect github.com/tecbot/gorocksdb v0.0.0-20190705090504-162552197222 // indirect - github.com/urfave/cli v1.22.1 go.etcd.io/bbolt v1.3.3 // indirect - go.uber.org/zap v1.10.0 - google.golang.org/genproto v0.0.0-20190916214212-f660b8655731 - google.golang.org/grpc v1.23.1 + go.uber.org/zap v1.14.1 + google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c + google.golang.org/grpc v1.28.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect - gopkg.in/yaml.v2 v2.2.2 ) diff --git a/go.sum b/go.sum index 0379a4e..974344e 100644 --- a/go.sum +++ b/go.sum @@ -1,73 +1,95 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= +github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/RoaringBitmap/roaring v0.4.17 h1:oCYFIFEMSQZrLHpywH7919esI1VSrQZ0pJXkZPGIJ78= github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= -github.com/RoaringBitmap/roaring v0.4.21 h1:WJ/zIlNX4wQZ9x8Ey33O1UaD9TCTakYsdLFSBcTwH+8= -github.com/RoaringBitmap/roaring v0.4.21/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/Smerity/govarint v0.0.0-20150407073650-7265e41f48f1/go.mod h1:o80NPAib/LOl8Eysqppjj7kkGkqz++eqzYGlvROpDcQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= +github.com/bbva/raft-badger v1.0.0 h1:N8C2rELUxfrVZhtyCBja/ymhv8cvPhVB+3ab2ob9mkk= +github.com/bbva/raft-badger v1.0.0/go.mod h1:yQjfHBXGV55aXOoEAuNGNlIIGvGNbSG85gOLhfo0pDM= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/blevesearch/bleve v0.7.0/go.mod h1:Y2lmIkzV6mcNfAnAdOd+ZxHkHchhBfU/xroGIp61wfw= -github.com/blevesearch/bleve v0.8.1 h1:20zBREtGe8dvBxCC+717SaxKcUVQOWk3/Fm75vabKpU= -github.com/blevesearch/bleve v0.8.1/go.mod h1:Y2lmIkzV6mcNfAnAdOd+ZxHkHchhBfU/xroGIp61wfw= +github.com/blevesearch/bleve v0.8.0 h1:DCoCrxscCXrlzVWK92k7Vq4d28lTAFuigVmcgIX0VCo= +github.com/blevesearch/bleve v0.8.0/go.mod h1:Y2lmIkzV6mcNfAnAdOd+ZxHkHchhBfU/xroGIp61wfw= github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040 h1:SjYVcfJVZoCfBlg+fkaq2eoZHTf5HaJfaTeTkOtyfHQ= github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040/go.mod h1:WH+MU2F4T0VmSdaPX+Wu5GYoZBrYWdOZWSjzvYcDmqQ= -github.com/blevesearch/cld2 v0.0.0-20150916130542-10f17c049ec9 h1:ZPImXwzC+ICkkSYlPP9mMVgQlZH24+56rIEUjVxfFnY= -github.com/blevesearch/cld2 v0.0.0-20150916130542-10f17c049ec9/go.mod h1:PN0QNTLs9+j1bKy3d/GB/59wsNBFC4sWLWG3k69lWbc= +github.com/blevesearch/cld2 v0.0.0-20200327141045-8b5f551d37f5 h1:/4ikScMMYMqsRFWJjCyzd3CNWB0lxvqDkqa5nEv6NMc= +github.com/blevesearch/cld2 v0.0.0-20200327141045-8b5f551d37f5/go.mod h1:PN0QNTLs9+j1bKy3d/GB/59wsNBFC4sWLWG3k69lWbc= github.com/blevesearch/go-porterstemmer v1.0.2 h1:qe7n69gBd1OLY5sHKnxQHIbzn0LNJA4hpAf+5XDxV2I= github.com/blevesearch/go-porterstemmer v1.0.2/go.mod h1:haWQqFT3RdOGz7PJuM3or/pWNJS1pKkoZJWCkWu0DVA= github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f h1:kqbi9lqXLLs+zfWlgo1PIiRQ86n33K1JKotjj4rSYOg= github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f/go.mod h1:IInt5XRvpiGE09KOk9mmCMLjHhydIhNPKPPFLFBB7L8= -github.com/blevesearch/snowballstem v0.0.0-20180110192139-26b06a2c243d h1:iPCfLXcTYDotqO1atEOQyoRDwlGaZVuMI4wSaKQlI2I= -github.com/blevesearch/snowballstem v0.0.0-20180110192139-26b06a2c243d/go.mod h1:cdytUvf6FKWA9NpXJihYdZq8TN2AiQ5HOS0UZUz0C9g= +github.com/blevesearch/snowballstem v0.0.0-20200325004757-48afb64082dd h1:YVyOs9yxpxqcB93Ul/UbdGTh26TrTafZrLdCqbJ4IXs= +github.com/blevesearch/snowballstem v0.0.0-20200325004757-48afb64082dd/go.mod h1:cdytUvf6FKWA9NpXJihYdZq8TN2AiQ5HOS0UZUz0C9g= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/couchbase/ghistogram v0.0.0-20170308220240-d910dd063dd6 h1:T7Qykid5GIoDEVTZL0NcbimcT2qmzjo5mNGhe8i0/5M= -github.com/couchbase/ghistogram v0.0.0-20170308220240-d910dd063dd6/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/couchbase/ghistogram v0.1.0 h1:b95QcQTCzjTUocDXp/uMgSNQi8oj1tGwnJ4bODWZnps= +github.com/couchbase/ghistogram v0.1.0/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498 h1:b8rnI4JWbakUNfpmYDxGobTY/jTuF5zHLw0ID75yzuM= github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498/go.mod h1:mGI1GcdgmlL3Imff7Z+OjkkQ8qSKr443BuZ+qFgWbPQ= github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe h1:2o6Y7KMjJNsuMTF8f2H2eTKRhqH7+bQbjr+D+LnhE5M= github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe/go.mod h1:prYTC8EgTu3gwbqJihkud9zRXISvyulAplQ6exdCo1g= -github.com/couchbase/vellum v0.0.0-20190829182332-ef2e028c01fd h1:zeuJhcG3f8eePshH3KxkNE+Xtl53pVln9MOUPMyr/1w= -github.com/couchbase/vellum v0.0.0-20190829182332-ef2e028c01fd/go.mod h1:xbc8Ff/oG7h2ejd7AlwOpfd+6QZntc92ygpAOfGwcKY= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d h1:SwD98825d6bdB+pEuTxWOXiSjBrHdOl/UVp75eI7JT8= github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 h1:MZRmHqDBd0vxNwenEbKSQqRVT24d3C05ft8kduSwlqM= github.com/cznic/strutil v0.0.0-20181122101858-275e90344537/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/badger v2.0.0-rc.2.0.20190626232749-b116882676f2+incompatible h1:xeEWHqaQFcm44dJsZYN6JIiLCHG+DciygDfGvIfbkv8= -github.com/dgraph-io/badger v2.0.0-rc.2.0.20190626232749-b116882676f2+incompatible/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f h1:dDxpBYafY/GYpcl+LS4Bn3ziLPuEdGRkRjYAbSlWxSA= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgraph-io/badger/v2 v2.0.0 h1:Cr05o2TUd2IcLbEY0aGd8mbjm1YyQpy+dswo3BcDXrE= +github.com/dgraph-io/badger/v2 v2.0.0/go.mod h1:YoRSIp1LmAJ7zH7tZwRvjNMUYLxB4wl3ebYkaIruZ04= +github.com/dgraph-io/ristretto v0.0.0-20191025175511-c1f00be0418e h1:aeUNgwup7PnDOBAD1BOKAqzb/W/NksOj6r3dwKKuqfg= +github.com/dgraph-io/ristretto v0.0.0-20191025175511-c1f00be0418e/go.mod h1:edzKIzGvqUCMzhTVWbiTSe75zD9Xxq0GtSBtFmaUTZs= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20191112170834-c2139c5d712b h1:SeiGBzKrEtuDddnBABHkp4kq9sBGE9nuYmk6FPTg0zg= +github.com/dgryski/go-farm v0.0.0-20191112170834-c2139c5d712b/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= @@ -83,43 +105,48 @@ github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493 h1:OTanQnFt0bi5iLFSdbEVA/idR6Q2WhCm+deb7ir2CcM= github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0 h1:G8O7TerXerS4F6sx9OV7/nRfJdnXgHZu/S/7F2SN+UE= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg= -github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 h1:0IKlLyQ3Hs9nDaiK5cSHAGmcQEIC8l2Ts1u6x5Dfrqg= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.11.1 h1:/dBYI+n4xIL+Y9SKXQrjlKTmJJDwCSlNLRwZ5nBhIek= -github.com/grpc-ecosystem/grpc-gateway v1.11.1/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.14.3 h1:OCJlWkOUoTnl0neNGlf4fUm3TmbEtguw7vR+nGtnDjY= +github.com/grpc-ecosystem/grpc-gateway v1.14.3/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= @@ -132,24 +159,24 @@ github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCS github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= -github.com/hashicorp/raft v1.1.1 h1:HJr7UE1x/JrJSc9Oy6aDBHtNHUUBHjcQjTgvUVihoZs= github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft v1.1.2 h1:oxEL5DDeurYxLd3UbcY/hccgSPhLLpiBZ1YxtWEq59c= +github.com/hashicorp/raft v1.1.2/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= -github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477 h1:bLsrEmB2NUwkHH18FOJBIa04wOV2RQalJrcafTYu6Lg= -github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477/go.mod h1:aUF6HQr8+t3FC/ZHAC+pZreUBhTaxumuu3L+d37uRxk= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ikawaha/kagome.ipadic v1.1.0 h1:9hzwhcklEL4Cmp+lM9HQfmDg2nhB43Fe1n9UUY6mifY= -github.com/ikawaha/kagome.ipadic v1.1.0/go.mod h1:DPSBbU0czaJhAb/5uKQZHMc9MTVRpDugJfX+HddPHHg= -github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= -github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/ikawaha/kagome.ipadic v1.1.2 h1:pFxZ1PpMpc6ZoBK712YN5cVK0u/ju2DZ+gRIOriJFFs= +github.com/ikawaha/kagome.ipadic v1.1.2/go.mod h1:DPSBbU0czaJhAb/5uKQZHMc9MTVRpDugJfX+HddPHHg= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -163,24 +190,26 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217 h1:oWyemD7bnPAGRGGPE22W1Z+kspkC7Uclz5rdzgxxiwk= -github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217/go.mod h1:5JLTyA+23fYz/BfD5Hn736mGEZopzWtEx1pdNfnTp8k= +github.com/mash/go-accesslog v1.1.0 h1:y22583qP3s+SePBs6mv8ZTz5D1UffPrSg+WFEW2Rf/c= +github.com/mash/go-accesslog v1.1.0/go.mod h1:DAbGQzio0KX16krP/3uouoTPxGbzcPjFAb948zazOgg= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mosuka/bbadger v0.1.0 h1:yc0UbkZFREZjzcNqXJp0/DPOTWld9Vq/S/MTHOb4x14= -github.com/mosuka/bbadger v0.1.0/go.mod h1:Er3F7xRxkBmVSIhqjA9CSk7ovFqfdcZDdzFBWJqfwog= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -189,6 +218,7 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= @@ -198,25 +228,38 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= -github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= @@ -229,68 +272,99 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykE github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff h1:86HlEv0yBCry9syNuylzqznKXDK11p6D0DT596yNMys= github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.7 h1:FfTH+vuMXOas8jmfb5/M7dzEYx7LpcLb7a0LPe34uOU= +github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 h1:JNEGSiWg6D3lcBCMCBqN3ELniXujt+0QNHLhNnO0w3s= github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2/go.mod h1:mjqs7N0Q6m5HpR7QfXVBZXZWSqTjQLeTujjA/xUp2uw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/tebeka/snowball v0.3.0 h1:/vP76OjIhZrXtcmBmQgQ986B/WM95MB4tdLEuWdDgZk= -github.com/tebeka/snowball v0.3.0/go.mod h1:4IfL14h1lvwZcp1sfXuuc7/7yCsvVffTWxWxCLfFpYg= +github.com/tebeka/snowball v0.4.1 h1:erVaJlHNQD465+S9dBGnl/AdDiGU0N8FTRo5QexNgCs= +github.com/tebeka/snowball v0.4.1/go.mod h1:4IfL14h1lvwZcp1sfXuuc7/7yCsvVffTWxWxCLfFpYg= github.com/tecbot/gorocksdb v0.0.0-20190705090504-162552197222 h1:FLimlAjzuhq8loeLX7lLhKKeUgpA/4slynlNVB/Qaks= github.com/tecbot/gorocksdb v0.0.0-20190705090504-162552197222/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0 h1:2mqDk8w/o6UmeUCu5Qiq2y7iMf6anbx+YA8d1JFoFrs= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -298,35 +372,53 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5 h1:sM3evRHxE/1RuMe1FYAL3j7C7fUfIjkbE+NiDAYUF8U= golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190916214212-f660b8655731 h1:Phvl0+G5t5k/EUFUi0wPdUUeTL2HydMQUXHnunWgSb0= -google.golang.org/genproto v0.0.0-20190916214212-f660b8655731/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c h1:hrpEMCZ2O7DR5gC1n2AJGVhrwiEjOi35+jxtIuZpTMo= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= @@ -336,7 +428,12 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/hashutils/hashutils.go b/hashutils/hashutils.go deleted file mode 100644 index 2ac1911..0000000 --- a/hashutils/hashutils.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package hashutils - -import ( - "crypto/sha256" - "encoding/hex" - "encoding/json" -) - -func Hash(v interface{}) (string, error) { - b, err := json.Marshal(v) - if err != nil { - return "", err - } - - hb := sha256.Sum256(b) - - return hex.EncodeToString(hb[:]), nil -} diff --git a/http/metric.go b/http/metric.go deleted file mode 100644 index 09afbf5..0000000 --- a/http/metric.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package http - -import ( - "net/http" - "strconv" - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - namespace = "http" - subsystem = "server" - - DurationSeconds = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "handling_seconds", - Help: "The invocation duration in seconds.", - }, - []string{ - "request_uri", - }, - ) - - RequestsTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "handled_total", - Help: "The number of requests.", - }, - []string{ - "request_uri", - "http_method", - "http_status", - }, - ) - - RequestsBytesTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "requests_received_bytes", - Help: "A summary of the invocation requests bytes.", - }, - []string{ - "request_uri", - "http_method", - }, - ) - - ResponsesBytesTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "responses_sent_bytes", - Help: "A summary of the invocation responses bytes.", - }, - []string{ - "request_uri", - "http_method", - }, - ) -) - -func init() { - prometheus.MustRegister(DurationSeconds) - prometheus.MustRegister(RequestsTotal) - prometheus.MustRegister(RequestsBytesTotal) - prometheus.MustRegister(ResponsesBytesTotal) -} - -func RecordMetrics(start time.Time, status int, writer http.ResponseWriter, request *http.Request) { - DurationSeconds.With(prometheus.Labels{"request_uri": request.RequestURI}).Observe(float64(time.Since(start)) / float64(time.Second)) - - RequestsTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "http_method": request.Method, "http_status": strconv.Itoa(status)}).Inc() - - RequestsBytesTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "http_method": request.Method}).Add(float64(request.ContentLength)) - - contentLength, err := strconv.ParseFloat(writer.Header().Get("Content-Length"), 64) - if err == nil { - ResponsesBytesTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "http_method": request.Method}).Add(contentLength) - } -} diff --git a/http/response.go b/http/response.go deleted file mode 100644 index d51fdc2..0000000 --- a/http/response.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package http - -import ( - "encoding/json" - "net/http" - "strconv" - - "go.uber.org/zap" -) - -func NewJSONMessage(msgMap map[string]interface{}) ([]byte, error) { - content, err := json.MarshalIndent(msgMap, "", " ") - if err != nil { - return nil, err - } - - return content, nil -} - -func WriteResponse(w http.ResponseWriter, content []byte, status int, logger *zap.Logger) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.Header().Set("Content-Length", strconv.FormatInt(int64(len(content)), 10)) - w.WriteHeader(status) - _, err := w.Write(content) - if err != nil { - logger.Error(err.Error()) - } - - return -} diff --git a/indexer/grpc_client.go b/indexer/grpc_client.go deleted file mode 100644 index 38ace62..0000000 --- a/indexer/grpc_client.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "context" - "math" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/protobuf/index" - "google.golang.org/grpc" -) - -type GRPCClient struct { - ctx context.Context - cancel context.CancelFunc - conn *grpc.ClientConn - client index.IndexClient -} - -func NewGRPCContext() (context.Context, context.CancelFunc) { - baseCtx := context.TODO() - //return context.WithTimeout(baseCtx, 60*time.Second) - return context.WithCancel(baseCtx) -} - -func NewGRPCClient(address string) (*GRPCClient, error) { - ctx, cancel := NewGRPCContext() - - //streamRetryOpts := []grpc_retry.CallOption{ - // grpc_retry.Disable(), - //} - - //unaryRetryOpts := []grpc_retry.CallOption{ - // grpc_retry.WithBackoff(grpc_retry.BackoffLinear(100 * time.Millisecond)), - // grpc_retry.WithCodes(codes.Unavailable), - // grpc_retry.WithMax(100), - //} - - dialOpts := []grpc.DialOption{ - grpc.WithInsecure(), - grpc.WithDefaultCallOptions( - grpc.MaxCallSendMsgSize(math.MaxInt32), - grpc.MaxCallRecvMsgSize(math.MaxInt32), - ), - //grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(streamRetryOpts...)), - //grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(unaryRetryOpts...)), - } - - conn, err := grpc.DialContext(ctx, address, dialOpts...) - if err != nil { - return nil, err - } - - return &GRPCClient{ - ctx: ctx, - cancel: cancel, - conn: conn, - client: index.NewIndexClient(conn), - }, nil -} - -func (c *GRPCClient) Cancel() { - c.cancel() -} - -func (c *GRPCClient) Close() error { - c.Cancel() - if c.conn != nil { - return c.conn.Close() - } - - return c.ctx.Err() -} - -func (c *GRPCClient) GetAddress() string { - return c.conn.Target() -} - -func (c *GRPCClient) NodeHealthCheck(req *index.NodeHealthCheckRequest, opts ...grpc.CallOption) (*index.NodeHealthCheckResponse, error) { - return c.client.NodeHealthCheck(c.ctx, req, opts...) -} - -func (c *GRPCClient) NodeInfo(req *empty.Empty, opts ...grpc.CallOption) (*index.NodeInfoResponse, error) { - return c.client.NodeInfo(c.ctx, req, opts...) -} - -func (c *GRPCClient) ClusterJoin(req *index.ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.ClusterJoin(c.ctx, req, opts...) -} - -func (c *GRPCClient) ClusterLeave(req *index.ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.ClusterLeave(c.ctx, req, opts...) -} - -func (c *GRPCClient) ClusterInfo(req *empty.Empty, opts ...grpc.CallOption) (*index.ClusterInfoResponse, error) { - return c.client.ClusterInfo(c.ctx, &empty.Empty{}, opts...) -} - -func (c *GRPCClient) ClusterWatch(req *empty.Empty, opts ...grpc.CallOption) (index.Index_ClusterWatchClient, error) { - return c.client.ClusterWatch(c.ctx, req, opts...) -} - -func (c *GRPCClient) Get(req *index.GetRequest, opts ...grpc.CallOption) (*index.GetResponse, error) { - return c.client.Get(c.ctx, req, opts...) -} - -func (c *GRPCClient) Index(req *index.IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.Index(c.ctx, req, opts...) -} - -func (c *GRPCClient) Delete(req *index.DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.Delete(c.ctx, req, opts...) -} - -func (c *GRPCClient) BulkIndex(req *index.BulkIndexRequest, opts ...grpc.CallOption) (*index.BulkIndexResponse, error) { - return c.client.BulkIndex(c.ctx, req, opts...) -} - -func (c *GRPCClient) BulkDelete(req *index.BulkDeleteRequest, opts ...grpc.CallOption) (*index.BulkDeleteResponse, error) { - return c.client.BulkDelete(c.ctx, req, opts...) -} - -func (c *GRPCClient) Search(req *index.SearchRequest, opts ...grpc.CallOption) (*index.SearchResponse, error) { - return c.client.Search(c.ctx, req, opts...) -} - -func (c *GRPCClient) GetIndexConfig(req *empty.Empty, opts ...grpc.CallOption) (*index.GetIndexConfigResponse, error) { - return c.client.GetIndexConfig(c.ctx, &empty.Empty{}, opts...) -} - -func (c *GRPCClient) GetIndexStats(req *empty.Empty, opts ...grpc.CallOption) (*index.GetIndexStatsResponse, error) { - return c.client.GetIndexStats(c.ctx, &empty.Empty{}, opts...) -} - -func (c *GRPCClient) Snapshot(req *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.Snapshot(c.ctx, &empty.Empty{}) -} diff --git a/indexer/grpc_gateway.go b/indexer/grpc_gateway.go deleted file mode 100644 index 3a1fafa..0000000 --- a/indexer/grpc_gateway.go +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "bufio" - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - - "github.com/blevesearch/bleve" - "github.com/golang/protobuf/ptypes/any" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/index" - "go.uber.org/zap" - "google.golang.org/grpc" -) - -type JsonMarshaler struct{} - -// ContentType always Returns "application/json". -func (*JsonMarshaler) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -func (j *JsonMarshaler) Marshal(v interface{}) ([]byte, error) { - switch v.(type) { - case *index.GetResponse: - value, err := protobuf.MarshalAny(v.(*index.GetResponse).Fields) - if err != nil { - return nil, err - } - return json.Marshal( - map[string]interface{}{ - "fields": value, - }, - ) - case *index.SearchResponse: - value, err := protobuf.MarshalAny(v.(*index.SearchResponse).SearchResult) - if err != nil { - return nil, err - } - return json.Marshal( - map[string]interface{}{ - "search_result": value, - }, - ) - default: - return json.Marshal(v) - } -} - -// Unmarshal unmarshals JSON data into "v". -func (j *JsonMarshaler) Unmarshal(data []byte, v interface{}) error { - switch v.(type) { - case *index.SearchRequest: - m := map[string]interface{}{} - err := json.Unmarshal(data, &m) - if err != nil { - return err - } - searchRequestMap, ok := m["search_request"] - if !ok { - return errors.New("search_request does not exist") - } - searchRequestBytes, err := json.Marshal(searchRequestMap) - if err != nil { - return err - } - searchRequest := bleve.NewSearchRequest(nil) - err = json.Unmarshal(searchRequestBytes, searchRequest) - if err != nil { - return err - } - v.(*index.SearchRequest).SearchRequest = &any.Any{} - return protobuf.UnmarshalAny(searchRequest, v.(*index.SearchRequest).SearchRequest) - default: - return json.Unmarshal(data, v) - } -} - -// NewDecoder returns a Decoder which reads JSON stream from "r". -func (j *JsonMarshaler) NewDecoder(r io.Reader) runtime.Decoder { - return runtime.DecoderFunc( - func(v interface{}) error { - buffer, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - switch v.(type) { - case *index.IndexRequest: - var tmpValue map[string]interface{} - err = json.Unmarshal(buffer, &tmpValue) - if err != nil { - return err - } - id, ok := tmpValue["id"].(string) - if ok { - v.(*index.IndexRequest).Id = id - } - - fields, ok := tmpValue["fields"] - if !ok { - return errors.New("value does not exist") - } - v.(*index.IndexRequest).Fields = &any.Any{} - return protobuf.UnmarshalAny(fields, v.(*index.IndexRequest).Fields) - case *index.SearchRequest: - var tmpValue map[string]interface{} - err = json.Unmarshal(buffer, &tmpValue) - if err != nil { - return err - } - searchRequestMap, ok := tmpValue["search_request"] - if !ok { - return errors.New("value does not exist") - } - searchRequestBytes, err := json.Marshal(searchRequestMap) - if err != nil { - return err - } - var searchRequest *bleve.SearchRequest - err = json.Unmarshal(searchRequestBytes, &searchRequest) - if err != nil { - return err - } - v.(*index.SearchRequest).SearchRequest = &any.Any{} - return protobuf.UnmarshalAny(searchRequest, v.(*index.SearchRequest).SearchRequest) - default: - return json.Unmarshal(buffer, v) - } - }, - ) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JsonMarshaler) NewEncoder(w io.Writer) runtime.Encoder { - return json.NewEncoder(w) -} - -// Delimiter for newline encoded JSON streams. -func (j *JsonMarshaler) Delimiter() []byte { - return []byte("\n") -} - -type JsonlMarshaler struct{} - -// ContentType always Returns "application/json". -func (*JsonlMarshaler) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -func (j *JsonlMarshaler) Marshal(v interface{}) ([]byte, error) { - return json.Marshal(v) -} - -// Unmarshal unmarshals JSON data into "v". -func (j *JsonlMarshaler) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// NewDecoder returns a Decoder which reads JSON-LINE stream from "r". -func (j *JsonlMarshaler) NewDecoder(r io.Reader) runtime.Decoder { - return runtime.DecoderFunc( - func(v interface{}) error { - buffer, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - switch v.(type) { - case *index.BulkIndexRequest: - docs := make([]*index.Document, 0) - reader := bufio.NewReader(bytes.NewReader(buffer)) - for { - docBytes, err := reader.ReadBytes('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - break - } - } - - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - } - v.(*index.BulkIndexRequest).Documents = docs - return nil - default: - return json.Unmarshal(buffer, v) - } - }, - ) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JsonlMarshaler) NewEncoder(w io.Writer) runtime.Encoder { - return json.NewEncoder(w) -} - -// Delimiter for newline encoded JSON streams. -func (j *JsonlMarshaler) Delimiter() []byte { - return []byte("\n") -} - -type TextMarshaler struct{} - -// ContentType always Returns "application/json". -func (*TextMarshaler) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -func (j *TextMarshaler) Marshal(v interface{}) ([]byte, error) { - return json.Marshal(v) -} - -// Unmarshal unmarshals JSON data into "v". -func (j *TextMarshaler) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// NewDecoder returns a Decoder which reads text stream from "r". -func (j *TextMarshaler) NewDecoder(r io.Reader) runtime.Decoder { - return runtime.DecoderFunc( - func(v interface{}) error { - buffer, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - switch v.(type) { - case *index.BulkDeleteRequest: - ids := make([]string, 0) - reader := bufio.NewReader(bytes.NewReader(buffer)) - for { - //idBytes, err := reader.ReadBytes('\n') - idBytes, _, err := reader.ReadLine() - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(idBytes) > 0 { - ids = append(ids, string(idBytes)) - } - break - } - } - - if len(idBytes) > 0 { - ids = append(ids, string(idBytes)) - } - } - v.(*index.BulkDeleteRequest).Ids = ids - return nil - default: - return json.Unmarshal(buffer, v) - } - }, - ) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *TextMarshaler) NewEncoder(w io.Writer) runtime.Encoder { - return json.NewEncoder(w) -} - -// Delimiter for newline encoded JSON streams. -func (j *TextMarshaler) Delimiter() []byte { - return []byte("\n") -} - -type GRPCGateway struct { - grpcGatewayAddr string - grpcAddr string - logger *zap.Logger - - ctx context.Context - cancel context.CancelFunc - listener net.Listener -} - -func NewGRPCGateway(grpcGatewayAddr string, grpcAddr string, logger *zap.Logger) (*GRPCGateway, error) { - return &GRPCGateway{ - grpcGatewayAddr: grpcGatewayAddr, - grpcAddr: grpcAddr, - logger: logger, - }, nil -} - -func (s *GRPCGateway) Start() error { - s.ctx, s.cancel = NewGRPCContext() - - mux := runtime.NewServeMux( - runtime.WithMarshalerOption("application/json", new(JsonMarshaler)), - runtime.WithMarshalerOption("application/x-ndjson", new(JsonlMarshaler)), - runtime.WithMarshalerOption("text/plain", new(TextMarshaler)), - ) - opts := []grpc.DialOption{grpc.WithInsecure()} - - err := index.RegisterIndexHandlerFromEndpoint(s.ctx, mux, s.grpcAddr, opts) - if err != nil { - return err - } - - s.listener, err = net.Listen("tcp", s.grpcGatewayAddr) - if err != nil { - return err - } - - err = http.Serve(s.listener, mux) - if err != nil { - return err - } - - return nil -} - -func (s *GRPCGateway) Stop() error { - defer s.cancel() - - err := s.listener.Close() - if err != nil { - return err - } - - return nil -} - -func (s *GRPCGateway) GetAddress() (string, error) { - tcpAddr, err := net.ResolveTCPAddr("tcp", s.listener.Addr().String()) - if err != nil { - return "", err - } - - v4Addr := "" - if tcpAddr.IP.To4() != nil { - v4Addr = tcpAddr.IP.To4().String() - } - port := tcpAddr.Port - - return fmt.Sprintf("%s:%d", v4Addr, port), nil -} diff --git a/indexer/grpc_server.go b/indexer/grpc_server.go deleted file mode 100644 index 8dd8c78..0000000 --- a/indexer/grpc_server.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "net" - - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/mosuka/blast/protobuf/index" - "go.uber.org/zap" - "google.golang.org/grpc" - //grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" - //grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" - //grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags" - //grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" -) - -type GRPCServer struct { - service index.IndexServer - server *grpc.Server - listener net.Listener - - logger *zap.Logger -} - -func NewGRPCServer(grpcAddr string, service index.IndexServer, logger *zap.Logger) (*GRPCServer, error) { - server := grpc.NewServer( - grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( - //grpc_ctxtags.StreamServerInterceptor(), - //grpc_opentracing.StreamServerInterceptor(), - grpc_prometheus.StreamServerInterceptor, - grpc_zap.StreamServerInterceptor(logger), - //grpc_auth.StreamServerInterceptor(myAuthFunction), - //grpc_recovery.StreamServerInterceptor(), - )), - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( - //grpc_ctxtags.UnaryServerInterceptor(), - //grpc_opentracing.UnaryServerInterceptor(), - grpc_prometheus.UnaryServerInterceptor, - grpc_zap.UnaryServerInterceptor(logger), - //grpc_auth.UnaryServerInterceptor(myAuthFunction), - //grpc_recovery.UnaryServerInterceptor(), - )), - ) - - index.RegisterIndexServer(server, service) - - grpc_prometheus.EnableHandlingTimeHistogram() - grpc_prometheus.Register(server) - - listener, err := net.Listen("tcp", grpcAddr) - if err != nil { - return nil, err - } - - return &GRPCServer{ - service: service, - server: server, - listener: listener, - logger: logger, - }, nil -} - -func (s *GRPCServer) Start() error { - s.logger.Info("start server") - err := s.server.Serve(s.listener) - if err != nil { - return err - } - - return nil -} - -func (s *GRPCServer) Stop() error { - s.logger.Info("stop server") - s.server.Stop() - //s.server.GracefulStop() - - return nil -} diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go deleted file mode 100644 index 63b8d78..0000000 --- a/indexer/grpc_service.go +++ /dev/null @@ -1,1018 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "sync" - "time" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/mapping" - "github.com/golang/protobuf/ptypes/any" - "github.com/golang/protobuf/ptypes/empty" - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/raft" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/index" - "github.com/mosuka/blast/protobuf/management" - "go.uber.org/zap" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type GRPCService struct { - managerGrpcAddress string - shardId string - raftServer *RaftServer - logger *zap.Logger - - updateClusterStopCh chan struct{} - updateClusterDoneCh chan struct{} - peers *index.Cluster - peerClients map[string]*GRPCClient - cluster *index.Cluster - clusterChans map[chan index.ClusterWatchResponse]struct{} - clusterMutex sync.RWMutex - - managers *management.Cluster - managerClients map[string]*manager.GRPCClient - updateManagersStopCh chan struct{} - updateManagersDoneCh chan struct{} -} - -func NewGRPCService(managerGrpcAddress string, shardId string, raftServer *RaftServer, logger *zap.Logger) (*GRPCService, error) { - return &GRPCService{ - managerGrpcAddress: managerGrpcAddress, - shardId: shardId, - raftServer: raftServer, - logger: logger, - - peers: &index.Cluster{Nodes: make(map[string]*index.Node, 0)}, - peerClients: make(map[string]*GRPCClient, 0), - cluster: &index.Cluster{Nodes: make(map[string]*index.Node, 0)}, - clusterChans: make(map[chan index.ClusterWatchResponse]struct{}), - - managers: &management.Cluster{Nodes: make(map[string]*management.Node, 0)}, - managerClients: make(map[string]*manager.GRPCClient, 0), - }, nil -} - -func (s *GRPCService) Start() error { - if s.managerGrpcAddress != "" { - var err error - s.managers, err = s.getManagerCluster(s.managerGrpcAddress) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - for id, node := range s.managers.Nodes { - client, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Fatal(err.Error(), zap.String("id", id), zap.String("grpc_address", s.managerGrpcAddress)) - } - s.managerClients[node.Id] = client - } - - s.logger.Info("start to update manager cluster info") - go s.startUpdateManagers(500 * time.Millisecond) - } - - s.logger.Info("start to update cluster info") - go s.startUpdateCluster(500 * time.Millisecond) - - return nil -} - -func (s *GRPCService) Stop() error { - s.logger.Info("stop to update cluster info") - s.stopUpdateCluster() - - if s.managerGrpcAddress != "" { - s.logger.Info("stop to update manager cluster info") - s.stopUpdateManagers() - } - - return nil -} - -func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { - var client *manager.GRPCClient - - for id, node := range s.managers.Nodes { - if node.Metadata == nil { - s.logger.Warn("assertion failed", zap.String("id", id)) - continue - } - - if node.State == management.Node_FOLLOWER || node.State == management.Node_LEADER { - var ok bool - client, ok = s.managerClients[id] - if ok { - return client, nil - } else { - s.logger.Error("node does not exist", zap.String("id", id)) - } - } else { - s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", node.State.String())) - } - } - - err := errors.New("available client does not exist") - s.logger.Error(err.Error()) - - return nil, err -} - -func (s *GRPCService) getManagerCluster(managerAddr string) (*management.Cluster, error) { - client, err := manager.NewGRPCClient(managerAddr) - defer func() { - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - return - }() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - req := &empty.Empty{} - res, err := client.ClusterInfo(req) - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return res.Cluster, nil -} - -func (s *GRPCService) cloneManagerCluster(cluster *management.Cluster) (*management.Cluster, error) { - b, err := json.Marshal(cluster) - if err != nil { - return nil, err - } - - var clone *management.Cluster - err = json.Unmarshal(b, &clone) - if err != nil { - return nil, err - } - - return clone, nil -} - -func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { - s.updateManagersStopCh = make(chan struct{}) - s.updateManagersDoneCh = make(chan struct{}) - - defer func() { - close(s.updateManagersDoneCh) - }() - - for { - select { - case <-s.updateManagersStopCh: - s.logger.Info("received a request to stop updating a manager cluster") - return - default: - // get client for manager from the list - client, err := s.getManagerClient() - if err != nil { - s.logger.Error(err.Error()) - continue - } - - // create stream for watching cluster changes - req := &empty.Empty{} - stream, err := client.ClusterWatch(req) - if err != nil { - s.logger.Error(err.Error()) - continue - } - - s.logger.Info("wait for receive a manager cluster updates from stream") - resp, err := stream.Recv() - if err == io.EOF { - s.logger.Info(err.Error()) - continue - } - if err != nil { - s.logger.Error(err.Error()) - continue - } - s.logger.Info("cluster has changed", zap.Any("resp", resp)) - switch resp.Event { - case management.ClusterWatchResponse_JOIN, management.ClusterWatchResponse_UPDATE: - // add to cluster nodes - s.managers.Nodes[resp.Node.Id] = resp.Node - - // check node state - switch resp.Node.State { - case management.Node_UNKNOWN, management.Node_SHUTDOWN: - // close client - if client, exist := s.managerClients[resp.Node.Id]; exist { - s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id)) - } - delete(s.managerClients, resp.Node.Id) - } - default: // management.Node_FOLLOWER, management.Node_CANDIDATE, management.Node_LEADER - if resp.Node.Metadata.GrpcAddress == "" { - s.logger.Warn("missing gRPC address", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - continue - } - - // check client that already exist in the client list - if client, exist := s.managerClients[resp.Node.Id]; !exist { - // create new client - s.logger.Info("create gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - newClient, err := manager.NewGRPCClient(resp.Node.Metadata.GrpcAddress) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - continue - } - s.managerClients[resp.Node.Id] = newClient - } else { - if client.GetAddress() != resp.Node.Metadata.GrpcAddress { - // close client - s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id)) - } - delete(s.managerClients, resp.Node.Id) - - // re-create new client - s.logger.Info("re-create gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - newClient, err := manager.NewGRPCClient(resp.Node.Metadata.GrpcAddress) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - continue - } - s.managerClients[resp.Node.Id] = newClient - } - } - } - case management.ClusterWatchResponse_LEAVE: - if client, exist := s.managerClients[resp.Node.Id]; exist { - s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) - } - delete(s.managerClients, resp.Node.Id) - } - - if _, exist := s.managers.Nodes[resp.Node.Id]; exist { - delete(s.managers.Nodes, resp.Node.Id) - } - default: - s.logger.Debug("unknown event", zap.Any("event", resp.Event)) - continue - } - } - } -} - -func (s *GRPCService) stopUpdateManagers() { - s.logger.Info("close all manager clients") - for id, client := range s.managerClients { - s.logger.Debug("close manager client", zap.String("id", id), zap.String("address", client.GetAddress())) - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - } - - if s.updateManagersStopCh != nil { - s.logger.Info("send a request to stop updating a manager cluster") - close(s.updateManagersStopCh) - } - - s.logger.Info("wait for the manager cluster update to stop") - <-s.updateManagersDoneCh - s.logger.Info("the manager cluster update has been stopped") -} - -func (s *GRPCService) getLeaderClient() (*GRPCClient, error) { - for id, node := range s.cluster.Nodes { - switch node.State { - case index.Node_LEADER: - if client, exist := s.peerClients[id]; exist { - return client, nil - } - } - } - - err := errors.New("there is no leader") - s.logger.Error(err.Error()) - return nil, err -} - -func (s *GRPCService) cloneCluster(cluster *index.Cluster) (*index.Cluster, error) { - b, err := json.Marshal(cluster) - if err != nil { - return nil, err - } - - var clone *index.Cluster - err = json.Unmarshal(b, &clone) - if err != nil { - return nil, err - } - - return clone, nil -} - -func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { - s.updateClusterStopCh = make(chan struct{}) - s.updateClusterDoneCh = make(chan struct{}) - - defer func() { - close(s.updateClusterDoneCh) - }() - - ticker := time.NewTicker(checkInterval) - defer ticker.Stop() - - savedCluster, err := s.cloneCluster(s.cluster) - if err != nil { - s.logger.Error(err.Error()) - return - } - - for { - select { - case <-s.updateClusterStopCh: - s.logger.Info("received a request to stop updating a cluster") - return - case <-ticker.C: - s.cluster, err = s.getCluster() - if err != nil { - s.logger.Error(err.Error()) - return - } - - snapshotCluster, err := s.cloneCluster(s.cluster) - if err != nil { - s.logger.Error(err.Error()) - return - } - - // create peer node list with out self node - for id, node := range snapshotCluster.Nodes { - if id != s.NodeID() { - s.peers.Nodes[id] = node - } - } - - // open clients for peer nodes - for id, node := range s.peers.Nodes { - if node.Metadata.GrpcAddress == "" { - s.logger.Debug("missing gRPC address", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - - client, exist := s.peerClients[id] - if exist { - if client.GetAddress() != node.Metadata.GrpcAddress { - s.logger.Info("recreate gRPC client", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - delete(s.peerClients, id) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id)) - } - newClient, err := NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - s.peerClients[id] = newClient - } - } else { - s.logger.Info("create gRPC client", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - newClient, err := NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - s.peerClients[id] = newClient - } - } - - // close clients for non-existent peer nodes - for id, client := range s.peerClients { - if _, exist := s.peers.Nodes[id]; !exist { - s.logger.Info("close gRPC client", zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) - } - delete(s.peerClients, id) - } - } - - // check joined and updated nodes - for id, node := range snapshotCluster.Nodes { - nodeSnapshot, exist := savedCluster.Nodes[id] - if exist { - // node exists in the cluster - n1, err := json.Marshal(node) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", node)) - continue - } - n2, err := json.Marshal(nodeSnapshot) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", nodeSnapshot)) - continue - } - if !cmp.Equal(n1, n2) { - // node updated - // notify the cluster changes - clusterResp := &index.ClusterWatchResponse{ - Event: index.ClusterWatchResponse_UPDATE, - Node: node, - Cluster: snapshotCluster, - } - for c := range s.clusterChans { - c <- *clusterResp - } - } - } else { - // node joined - // notify the cluster changes - clusterResp := &index.ClusterWatchResponse{ - Event: index.ClusterWatchResponse_JOIN, - Node: node, - Cluster: snapshotCluster, - } - for c := range s.clusterChans { - c <- *clusterResp - } - } - } - - // check left nodes - for id, node := range savedCluster.Nodes { - if _, exist := snapshotCluster.Nodes[id]; !exist { - // node left - // notify the cluster changes - clusterResp := &index.ClusterWatchResponse{ - Event: index.ClusterWatchResponse_LEAVE, - Node: node, - Cluster: snapshotCluster, - } - for c := range s.clusterChans { - c <- *clusterResp - } - } - } - - // set cluster state to manager - if !cmp.Equal(savedCluster, snapshotCluster) && s.managerGrpcAddress != "" && s.raftServer.IsLeader() { - snapshotClusterBytes, err := json.Marshal(snapshotCluster) - if err != nil { - s.logger.Error(err.Error()) - continue - } - var snapshotClusterMap map[string]interface{} - err = json.Unmarshal(snapshotClusterBytes, &snapshotClusterMap) - if err != nil { - s.logger.Error(err.Error()) - continue - } - - client, err := s.getManagerClient() - if err != nil { - s.logger.Error(err.Error()) - continue - } - valueAny := &any.Any{} - err = protobuf.UnmarshalAny(snapshotClusterMap, valueAny) - if err != nil { - s.logger.Error(err.Error()) - continue - } - req := &management.SetRequest{ - Key: fmt.Sprintf("cluster/shards/%s", s.shardId), - Value: valueAny, - } - _, err = client.Set(req) - if err != nil { - s.logger.Error(err.Error()) - continue - } - } - - savedCluster = snapshotCluster - default: - time.Sleep(100 * time.Millisecond) - } - } -} - -func (s *GRPCService) stopUpdateCluster() { - s.logger.Info("close all peer clients") - for id, client := range s.peerClients { - s.logger.Debug("close peer client", zap.String("id", id), zap.String("address", client.GetAddress())) - err := client.Close() - if err != nil { - s.logger.Warn(err.Error()) - } - } - - if s.updateClusterStopCh != nil { - s.logger.Info("send a request to stop updating a cluster") - close(s.updateClusterStopCh) - } - - s.logger.Info("wait for the cluster update to stop") - <-s.updateClusterDoneCh - s.logger.Info("the cluster update has been stopped") -} - -func (s *GRPCService) NodeHealthCheck(ctx context.Context, req *index.NodeHealthCheckRequest) (*index.NodeHealthCheckResponse, error) { - resp := &index.NodeHealthCheckResponse{} - - switch req.Probe { - case index.NodeHealthCheckRequest_UNKNOWN: - fallthrough - case index.NodeHealthCheckRequest_HEALTHINESS: - resp.State = index.NodeHealthCheckResponse_HEALTHY - case index.NodeHealthCheckRequest_LIVENESS: - resp.State = index.NodeHealthCheckResponse_ALIVE - case index.NodeHealthCheckRequest_READINESS: - resp.State = index.NodeHealthCheckResponse_READY - default: - err := errors.New("unknown probe") - s.logger.Error(err.Error()) - return resp, status.Error(codes.InvalidArgument, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) NodeID() string { - return s.raftServer.NodeID() -} - -func (s *GRPCService) getSelfNode() *index.Node { - node := s.raftServer.node - - switch s.raftServer.State() { - case raft.Follower: - node.State = index.Node_FOLLOWER - case raft.Candidate: - node.State = index.Node_CANDIDATE - case raft.Leader: - node.State = index.Node_LEADER - case raft.Shutdown: - node.State = index.Node_SHUTDOWN - default: - node.State = index.Node_UNKNOWN - } - - return node -} - -func (s *GRPCService) getPeerNode(id string) (*index.Node, error) { - if _, exist := s.peerClients[id]; !exist { - err := errors.New("node does not exist in peers") - s.logger.Debug(err.Error(), zap.String("id", id)) - return nil, err - } - - req := &empty.Empty{} - resp, err := s.peerClients[id].NodeInfo(req) - if err != nil { - s.logger.Debug(err.Error(), zap.String("id", id)) - return &index.Node{ - BindAddress: "", - State: index.Node_SHUTDOWN, - Metadata: &index.Metadata{ - GrpcAddress: "", - HttpAddress: "", - }, - }, nil - } - - return resp.Node, nil -} - -func (s *GRPCService) getNode(id string) (*index.Node, error) { - if id == "" || id == s.NodeID() { - return s.getSelfNode(), nil - } else { - return s.getPeerNode(id) - } -} - -func (s *GRPCService) NodeInfo(ctx context.Context, req *empty.Empty) (*index.NodeInfoResponse, error) { - resp := &index.NodeInfoResponse{} - - node, err := s.getNode(s.NodeID()) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return &index.NodeInfoResponse{ - Node: node, - }, nil -} - -func (s *GRPCService) setNode(node *index.Node) error { - if s.raftServer.IsLeader() { - err := s.raftServer.SetNode(node) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - req := &index.ClusterJoinRequest{ - Node: node, - } - - _, err = client.ClusterJoin(req) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } - - return nil -} - -func (s *GRPCService) ClusterJoin(ctx context.Context, req *index.ClusterJoinRequest) (*empty.Empty, error) { - resp := &empty.Empty{} - - err := s.setNode(req.Node) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) deleteNode(id string) error { - if s.raftServer.IsLeader() { - err := s.raftServer.DeleteNode(id) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - req := &index.ClusterLeaveRequest{ - Id: id, - } - - _, err = client.ClusterLeave(req) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } - - return nil -} - -func (s *GRPCService) ClusterLeave(ctx context.Context, req *index.ClusterLeaveRequest) (*empty.Empty, error) { - resp := &empty.Empty{} - - err := s.deleteNode(req.Id) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) getCluster() (*index.Cluster, error) { - cluster, err := s.raftServer.GetCluster() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - // update latest node state - for id := range cluster.Nodes { - node, err := s.getNode(id) - if err != nil { - s.logger.Debug(err.Error()) - continue - } - cluster.Nodes[id] = node - } - - return cluster, nil -} - -func (s *GRPCService) ClusterInfo(ctx context.Context, req *empty.Empty) (*index.ClusterInfoResponse, error) { - resp := &index.ClusterInfoResponse{} - - cluster, err := s.getCluster() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.Cluster = cluster - - return resp, nil -} - -func (s *GRPCService) ClusterWatch(req *empty.Empty, server index.Index_ClusterWatchServer) error { - chans := make(chan index.ClusterWatchResponse) - - s.clusterMutex.Lock() - s.clusterChans[chans] = struct{}{} - s.clusterMutex.Unlock() - - defer func() { - s.clusterMutex.Lock() - delete(s.clusterChans, chans) - s.clusterMutex.Unlock() - close(chans) - }() - - for resp := range chans { - err := server.Send(&resp) - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - } - - return nil -} - -func (s *GRPCService) Get(ctx context.Context, req *index.GetRequest) (*index.GetResponse, error) { - resp := &index.GetResponse{} - - fields, err := s.raftServer.Get(req.Id) - if err != nil { - switch err { - case blasterrors.ErrNotFound: - s.logger.Debug(err.Error(), zap.String("id", req.Id)) - return resp, status.Error(codes.NotFound, err.Error()) - default: - s.logger.Error(err.Error(), zap.String("id", req.Id)) - return resp, status.Error(codes.Internal, err.Error()) - } - } - - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(fields, fieldsAny) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", req.Id)) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.Fields = fieldsAny - - return resp, nil -} - -func (s *GRPCService) Index(ctx context.Context, req *index.IndexRequest) (*empty.Empty, error) { - resp := &empty.Empty{} - - // index - var err error - if s.raftServer.IsLeader() { - err = s.raftServer.Index(&index.Document{Id: req.Id, Fields: req.Fields}) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - resp, err = client.Index(req) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } - - return resp, nil -} - -func (s *GRPCService) Delete(ctx context.Context, req *index.DeleteRequest) (*empty.Empty, error) { - resp := &empty.Empty{} - - // delete - var err error - if s.raftServer.IsLeader() { - err = s.raftServer.Delete(req.Id) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - resp, err = client.Delete(req) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } - - return resp, nil -} - -func (s *GRPCService) BulkIndex(ctx context.Context, req *index.BulkIndexRequest) (*index.BulkIndexResponse, error) { - resp := &index.BulkIndexResponse{} - - if s.raftServer.IsLeader() { - count, err := s.raftServer.BulkIndex(req.Documents) - if err != nil { - s.logger.Error(err.Error()) - resp.Count = -1 - return resp, status.Error(codes.Internal, err.Error()) - } - resp.Count = int32(count) - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - resp, err = client.BulkIndex(req) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } - - return resp, nil -} - -func (s *GRPCService) BulkDelete(ctx context.Context, req *index.BulkDeleteRequest) (*index.BulkDeleteResponse, error) { - resp := &index.BulkDeleteResponse{} - - if s.raftServer.IsLeader() { - count, err := s.raftServer.BulkDelete(req.Ids) - if err != nil { - s.logger.Error(err.Error()) - resp.Count = -1 - return resp, status.Error(codes.Internal, err.Error()) - } - resp.Count = int32(count) - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - resp, err := client.BulkDelete(req) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } - - return resp, nil -} - -func (s *GRPCService) Search(ctx context.Context, req *index.SearchRequest) (*index.SearchResponse, error) { - resp := &index.SearchResponse{} - - searchRequest, err := protobuf.MarshalAny(req.SearchRequest) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.InvalidArgument, err.Error()) - } - - searchResult, err := s.raftServer.Search(searchRequest.(*bleve.SearchRequest)) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - searchResultAny := &any.Any{} - err = protobuf.UnmarshalAny(searchResult, searchResultAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.SearchResult = searchResultAny - - return resp, nil -} - -func (s *GRPCService) GetIndexConfig(ctx context.Context, req *empty.Empty) (*index.GetIndexConfigResponse, error) { - resp := &index.GetIndexConfigResponse{ - IndexConfig: &index.IndexConfig{}, - } - - indexConfig, err := s.raftServer.GetIndexConfig() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - if indexMapping, ok := indexConfig["index_mapping"]; ok { - indexMappingAny := &any.Any{} - err = protobuf.UnmarshalAny(indexMapping.(*mapping.IndexMappingImpl), indexMappingAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - resp.IndexConfig.IndexMapping = indexMappingAny - } - - if indexType, ok := indexConfig["index_type"]; ok { - resp.IndexConfig.IndexType = indexType.(string) - } - - if indexStorageType, ok := indexConfig["index_storage_type"]; ok { - resp.IndexConfig.IndexStorageType = indexStorageType.(string) - } - - return resp, nil -} - -func (s *GRPCService) GetIndexStats(ctx context.Context, req *empty.Empty) (*index.GetIndexStatsResponse, error) { - resp := &index.GetIndexStatsResponse{} - - indexStats, err := s.raftServer.GetIndexStats() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - indexStatsAny := &any.Any{} - err = protobuf.UnmarshalAny(indexStats, indexStatsAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.IndexStats = indexStatsAny - - return resp, nil -} - -func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { - resp := &empty.Empty{} - - err := s.raftServer.Snapshot() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} diff --git a/indexer/http_handler.go b/indexer/http_handler.go deleted file mode 100644 index 6a7353f..0000000 --- a/indexer/http_handler.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "net/http" - "time" - - "github.com/gorilla/mux" - blasthttp "github.com/mosuka/blast/http" - "github.com/mosuka/blast/version" - "github.com/prometheus/client_golang/prometheus/promhttp" - "go.uber.org/zap" -) - -type Router struct { - mux.Router - - logger *zap.Logger -} - -func NewRouter(logger *zap.Logger) (*Router, error) { - router := &Router{ - logger: logger, - } - - router.StrictSlash(true) - - router.Handle("/", NewRootHandler(logger)).Methods("GET") - router.Handle("/metrics", promhttp.Handler()).Methods("GET") - - return router, nil -} - -func (r *Router) Close() error { - return nil -} - -type RootHandler struct { - logger *zap.Logger -} - -func NewRootHandler(logger *zap.Logger) *RootHandler { - return &RootHandler{ - logger: logger, - } -} - -func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - msgMap := map[string]interface{}{ - "version": version.Version, - "status": status, - } - - content, err := blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} diff --git a/indexer/http_server.go b/indexer/http_server.go deleted file mode 100644 index 238da55..0000000 --- a/indexer/http_server.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "net" - "net/http" - - accesslog "github.com/mash/go-accesslog" - "go.uber.org/zap" -) - -type HTTPServer struct { - listener net.Listener - router *Router - - logger *zap.Logger - httpLogger accesslog.Logger -} - -func NewHTTPServer(httpAddr string, router *Router, logger *zap.Logger, httpLogger accesslog.Logger) (*HTTPServer, error) { - listener, err := net.Listen("tcp", httpAddr) - if err != nil { - return nil, err - } - - return &HTTPServer{ - listener: listener, - router: router, - logger: logger, - httpLogger: httpLogger, - }, nil -} - -func (s *HTTPServer) Start() error { - err := http.Serve( - s.listener, - accesslog.NewLoggingHandler( - s.router, - s.httpLogger, - ), - ) - if err != nil { - return err - } - - return nil -} - -func (s *HTTPServer) Stop() error { - err := s.listener.Close() - if err != nil { - return err - } - - return nil -} diff --git a/indexer/index.go b/indexer/index.go deleted file mode 100644 index 2c8a031..0000000 --- a/indexer/index.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "encoding/json" - "os" - "time" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/document" - "github.com/blevesearch/bleve/mapping" - "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/index" - "go.uber.org/zap" -) - -type Index struct { - indexMapping *mapping.IndexMappingImpl - indexType string - indexStorageType string - logger *zap.Logger - - index bleve.Index -} - -func NewIndex(dir string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, logger *zap.Logger) (*Index, error) { - //bleve.SetLog(logger) - - var index bleve.Index - _, err := os.Stat(dir) - if os.IsNotExist(err) { - // create new index - index, err = bleve.NewUsing(dir, indexMapping, indexType, indexStorageType, nil) - if err != nil { - logger.Error(err.Error()) - return nil, err - } - } else { - // open existing index - index, err = bleve.OpenUsing(dir, map[string]interface{}{ - "create_if_missing": false, - "error_if_exists": false, - }) - if err != nil { - logger.Error(err.Error()) - return nil, err - } - } - - return &Index{ - index: index, - indexMapping: indexMapping, - indexType: indexType, - indexStorageType: indexStorageType, - logger: logger, - }, nil -} - -func (i *Index) Close() error { - err := i.index.Close() - if err != nil { - i.logger.Error(err.Error()) - return err - } - - return nil -} - -func (i *Index) Get(id string) (map[string]interface{}, error) { - doc, err := i.index.Document(id) - if err != nil { - i.logger.Error(err.Error()) - return nil, err - } - if doc == nil { - return nil, errors.ErrNotFound - } - - fields := make(map[string]interface{}, 0) - for _, f := range doc.Fields { - var v interface{} - switch field := f.(type) { - case *document.TextField: - v = string(field.Value()) - case *document.NumericField: - n, err := field.Number() - if err == nil { - v = n - } - case *document.DateTimeField: - d, err := field.DateTime() - if err == nil { - v = d.Format(time.RFC3339Nano) - } - } - existing, existed := fields[f.Name()] - if existed { - switch existing := existing.(type) { - case []interface{}: - fields[f.Name()] = append(existing, v) - case interface{}: - arr := make([]interface{}, 2) - arr[0] = existing - arr[1] = v - fields[f.Name()] = arr - } - } else { - fields[f.Name()] = v - } - } - - return fields, nil -} - -func (i *Index) Search(request *bleve.SearchRequest) (*bleve.SearchResult, error) { - result, err := i.index.Search(request) - if err != nil { - i.logger.Error(err.Error()) - return nil, err - } - - return result, nil -} - -func (i *Index) Index(doc *index.Document) error { - _, err := i.BulkIndex([]*index.Document{doc}) - if err != nil { - i.logger.Error(err.Error()) - return err - } - - return nil -} - -func (i *Index) BulkIndex(docs []*index.Document) (int, error) { - batch := i.index.NewBatch() - - count := 0 - - for _, doc := range docs { - fieldsIntr, err := protobuf.MarshalAny(doc.Fields) - if err != nil { - i.logger.Error(err.Error(), zap.Any("doc", doc)) - continue - } - err = batch.Index(doc.Id, *fieldsIntr.(*map[string]interface{})) - if err != nil { - i.logger.Error(err.Error()) - continue - } - count++ - } - - err := i.index.Batch(batch) - if err != nil { - i.logger.Error(err.Error()) - return -1, err - } - - return count, nil -} - -func (i *Index) Delete(id string) error { - _, err := i.BulkDelete([]string{id}) - if err != nil { - i.logger.Error(err.Error()) - return err - } - - return nil -} - -func (i *Index) BulkDelete(ids []string) (int, error) { - batch := i.index.NewBatch() - - count := 0 - - for _, id := range ids { - batch.Delete(id) - count++ - } - - err := i.index.Batch(batch) - if err != nil { - i.logger.Error(err.Error()) - return -1, err - } - - return count, nil -} - -func (i *Index) Config() (map[string]interface{}, error) { - return map[string]interface{}{ - "index_mapping": i.indexMapping, - "index_type": i.indexType, - "index_storage_type": i.indexStorageType, - }, nil -} - -func (i *Index) Stats() (map[string]interface{}, error) { - return i.index.StatsMap(), nil -} - -func (i *Index) SnapshotItems() <-chan *index.Document { - ch := make(chan *index.Document, 1024) - - go func() { - idx, _, err := i.index.Advanced() - if err != nil { - i.logger.Error(err.Error()) - return - } - - r, err := idx.Reader() - if err != nil { - i.logger.Error(err.Error()) - return - } - - docCount := 0 - - dr, err := r.DocIDReaderAll() - for { - if dr == nil { - i.logger.Error(err.Error()) - break - } - id, err := dr.Next() - if id == nil { - i.logger.Debug("finished to read all document ids") - break - } else if err != nil { - i.logger.Warn(err.Error()) - continue - } - - // get original document - fieldsBytes, err := i.index.GetInternal(id) - - // bytes -> map[string]interface{} - var fieldsMap map[string]interface{} - err = json.Unmarshal([]byte(fieldsBytes), &fieldsMap) - if err != nil { - i.logger.Error(err.Error()) - break - } - - // map[string]interface{} -> Any - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(fieldsMap, fieldsAny) - if err != nil { - i.logger.Error(err.Error()) - break - } - - doc := &index.Document{ - Id: string(id), - Fields: fieldsAny, - } - - ch <- doc - - docCount = docCount + 1 - } - - i.logger.Debug("finished to write all documents to channel") - ch <- nil - - i.logger.Info("finished to snapshot", zap.Int("count", docCount)) - - return - }() - - return ch -} diff --git a/indexer/raft_fsm.go b/indexer/raft_fsm.go deleted file mode 100644 index da53222..0000000 --- a/indexer/raft_fsm.go +++ /dev/null @@ -1,363 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "encoding/json" - "errors" - "io" - "io/ioutil" - "sync" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/mapping" - "github.com/golang/protobuf/proto" - "github.com/hashicorp/raft" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/protobuf/index" - "go.uber.org/zap" -) - -type RaftFSM struct { - path string - indexMapping *mapping.IndexMappingImpl - indexType string - indexStorageType string - logger *zap.Logger - - cluster *index.Cluster - clusterMutex sync.RWMutex - - index *Index -} - -func NewRaftFSM(path string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, logger *zap.Logger) (*RaftFSM, error) { - return &RaftFSM{ - path: path, - indexMapping: indexMapping, - indexType: indexType, - indexStorageType: indexStorageType, - logger: logger, - }, nil -} - -func (f *RaftFSM) Start() error { - f.logger.Info("initialize cluster") - f.cluster = &index.Cluster{Nodes: make(map[string]*index.Node, 0)} - - f.logger.Info("initialize index") - var err error - f.index, err = NewIndex(f.path, f.indexMapping, f.indexType, f.indexStorageType, f.logger) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - return nil -} - -func (f *RaftFSM) Stop() error { - f.logger.Info("close index") - err := f.index.Close() - if err != nil { - f.logger.Error(err.Error()) - return err - } - - return nil -} - -func (f *RaftFSM) GetNode(nodeId string) (*index.Node, error) { - f.clusterMutex.RLock() - defer f.clusterMutex.RUnlock() - - node, ok := f.cluster.Nodes[nodeId] - if !ok { - return nil, blasterrors.ErrNotFound - } - - return node, nil -} - -func (f *RaftFSM) SetNode(node *index.Node) error { - f.clusterMutex.RLock() - defer f.clusterMutex.RUnlock() - - f.cluster.Nodes[node.Id] = node - - return nil -} - -func (f *RaftFSM) DeleteNode(nodeId string) error { - f.clusterMutex.RLock() - defer f.clusterMutex.RUnlock() - - if _, ok := f.cluster.Nodes[nodeId]; !ok { - return blasterrors.ErrNotFound - } - - delete(f.cluster.Nodes, nodeId) - - return nil -} - -func (f *RaftFSM) GetDocument(id string) (map[string]interface{}, error) { - fields, err := f.index.Get(id) - if err != nil { - switch err { - case blasterrors.ErrNotFound: - f.logger.Debug(err.Error(), zap.String("id", id)) - default: - f.logger.Error(err.Error(), zap.String("id", id)) - } - return nil, err - } - - return fields, nil -} - -func (f *RaftFSM) Index(doc *index.Document) error { - err := f.index.Index(doc) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - return nil -} - -func (f *RaftFSM) BulkIndex(docs []*index.Document) (int, error) { - count, err := f.index.BulkIndex(docs) - if err != nil { - f.logger.Error(err.Error()) - return -1, err - } - - return count, nil -} - -func (f *RaftFSM) Delete(id string) error { - err := f.index.Delete(id) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - return nil -} - -func (f *RaftFSM) BulkDelete(ids []string) (int, error) { - count, err := f.index.BulkDelete(ids) - if err != nil { - f.logger.Error(err.Error()) - return -1, err - } - - return count, nil -} - -func (f *RaftFSM) Search(request *bleve.SearchRequest) (*bleve.SearchResult, error) { - result, err := f.index.Search(request) - if err != nil { - f.logger.Error(err.Error()) - return nil, err - } - - return result, nil -} - -func (f *RaftFSM) GetIndexConfig() (map[string]interface{}, error) { - return f.index.Config() -} - -func (f *RaftFSM) GetIndexStats() (map[string]interface{}, error) { - return f.index.Stats() -} - -type fsmResponse struct { - error error -} - -type fsmBulkIndexResponse struct { - count int - error error -} - -type fsmBulkDeleteResponse struct { - count int - error error -} - -func (f *RaftFSM) Apply(l *raft.Log) interface{} { - proposal := &index.Proposal{} - err := proto.Unmarshal(l.Data, proposal) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - switch proposal.Event { - case index.Proposal_SET_NODE: - err = f.SetNode(proposal.Node) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - return &fsmResponse{error: nil} - case index.Proposal_DELETE_NODE: - err = f.DeleteNode(proposal.Node.Id) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - return &fsmResponse{error: nil} - case index.Proposal_INDEX: - err := f.Index(proposal.Document) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - return &fsmResponse{error: nil} - case index.Proposal_DELETE: - err := f.Delete(proposal.Id) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - return &fsmResponse{error: nil} - case index.Proposal_BULK_INDEX: - count, err := f.BulkIndex(proposal.Documents) - if err != nil { - f.logger.Error(err.Error()) - return &fsmBulkIndexResponse{count: count, error: err} - } - return &fsmBulkIndexResponse{count: count, error: nil} - case index.Proposal_BULK_DELETE: - count, err := f.BulkDelete(proposal.Ids) - if err != nil { - f.logger.Error(err.Error()) - return &fsmBulkDeleteResponse{count: count, error: err} - } - return &fsmBulkDeleteResponse{count: count, error: nil} - default: - err = errors.New("unsupported command") - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } -} - -func (f *RaftFSM) Snapshot() (raft.FSMSnapshot, error) { - f.logger.Info("snapshot") - - return &RaftFSMSnapshot{ - index: f.index, - logger: f.logger, - }, nil -} - -func (f *RaftFSM) Restore(rc io.ReadCloser) error { - f.logger.Info("restore") - - defer func() { - err := rc.Close() - if err != nil { - f.logger.Error(err.Error()) - } - }() - - data, err := ioutil.ReadAll(rc) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - docCount := 0 - - buff := proto.NewBuffer(data) - for { - doc := &index.Document{} - err = buff.DecodeMessage(doc) - if err == io.ErrUnexpectedEOF { - break - } - if err != nil { - f.logger.Error(err.Error()) - continue - } - - err = f.index.Index(doc) - if err != nil { - f.logger.Error(err.Error()) - continue - } - - docCount = docCount + 1 - } - - f.logger.Info("restore", zap.Int("count", docCount)) - - return nil -} - -// --------------------- - -type RaftFSMSnapshot struct { - index *Index - logger *zap.Logger -} - -func (f *RaftFSMSnapshot) Persist(sink raft.SnapshotSink) error { - f.logger.Info("persist") - - defer func() { - err := sink.Close() - if err != nil { - f.logger.Error(err.Error()) - } - }() - - ch := f.index.SnapshotItems() - - docCount := 0 - - for { - doc := <-ch - if doc == nil { - break - } - - docBytes, err := json.Marshal(doc) - if err != nil { - f.logger.Error(err.Error()) - continue - } - - _, err = sink.Write(docBytes) - if err != nil { - f.logger.Error(err.Error()) - continue - } - - docCount = docCount + 1 - } - - f.logger.Info("persist", zap.Int("count", docCount)) - - return nil -} - -func (f *RaftFSMSnapshot) Release() { - f.logger.Info("release") -} diff --git a/indexer/raft_server.go b/indexer/raft_server.go deleted file mode 100644 index 6c2c41c..0000000 --- a/indexer/raft_server.go +++ /dev/null @@ -1,688 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "errors" - "io/ioutil" - "net" - "os" - "path/filepath" - "time" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/mapping" - "github.com/golang/protobuf/proto" - "github.com/hashicorp/raft" - raftboltdb "github.com/hashicorp/raft-boltdb" - _ "github.com/mosuka/blast/builtins" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/protobuf/index" - "go.uber.org/zap" - //raftmdb "github.com/hashicorp/raft-mdb" -) - -type RaftServer struct { - node *index.Node - dataDir string - raftStorageType string - indexMapping *mapping.IndexMappingImpl - indexType string - indexStorageType string - bootstrap bool - logger *zap.Logger - - transport *raft.NetworkTransport - raft *raft.Raft - fsm *RaftFSM -} - -func NewRaftServer(node *index.Node, dataDir string, raftStorageType string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { - return &RaftServer{ - node: node, - dataDir: dataDir, - raftStorageType: raftStorageType, - indexMapping: indexMapping, - indexType: indexType, - indexStorageType: indexStorageType, - bootstrap: bootstrap, - logger: logger, - }, nil -} - -func (s *RaftServer) Start() error { - var err error - - fsmPath := filepath.Join(s.dataDir, "index") - s.logger.Info("create finite state machine", zap.String("path", fsmPath)) - s.fsm, err = NewRaftFSM(fsmPath, s.indexMapping, s.indexType, s.indexStorageType, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("start finite state machine") - err = s.fsm.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("create Raft config", zap.String("id", s.node.Id)) - raftConfig := raft.DefaultConfig() - raftConfig.LocalID = raft.ServerID(s.node.Id) - raftConfig.SnapshotThreshold = 1024 - raftConfig.LogOutput = ioutil.Discard - //if s.bootstrap { - // raftConfig.StartAsLeader = true - //} - - s.logger.Info("resolve TCP address", zap.String("bind_addr", s.node.BindAddress)) - addr, err := net.ResolveTCPAddr("tcp", s.node.BindAddress) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("create TCP transport", zap.String("bind_addr", s.node.BindAddress)) - s.transport, err = raft.NewTCPTransport(s.node.BindAddress, addr, 3, 10*time.Second, ioutil.Discard) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - snapshotPath := s.dataDir - s.logger.Info("create snapshot store", zap.String("path", snapshotPath)) - snapshotStore, err := raft.NewFileSnapshotStore(snapshotPath, 2, ioutil.Discard) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("create Raft machine") - var logStore raft.LogStore - var stableStore raft.StableStore - switch s.raftStorageType { - case "boltdb": - logStorePath := filepath.Join(s.dataDir, "raft", "log", "boltdb.db") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Dir(logStorePath), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - logStore, err = raftboltdb.NewBoltStore(logStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStorePath := filepath.Join(s.dataDir, "raft", "stable", "boltdb.db") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Dir(stableStorePath), 0755) - stableStore, err = raftboltdb.NewBoltStore(stableStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - //case "badger": - // logStorePath := filepath.Join(s.dataDir, "raft", "log") - // s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) - // err = os.MkdirAll(filepath.Join(logStorePath, "badger"), 0755) - // if err != nil { - // s.logger.Fatal(err.Error()) - // return err - // } - // logStore, err = raftbadgerdb.NewBadgerStore(logStorePath) - // if err != nil { - // s.logger.Fatal(err.Error()) - // return err - // } - // stableStorePath := filepath.Join(s.dataDir, "raft", "stable") - // s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) - // err = os.MkdirAll(filepath.Join(stableStorePath, "badger"), 0755) - // if err != nil { - // s.logger.Fatal(err.Error()) - // return err - // } - // stableStore, err = raftbadgerdb.NewBadgerStore(stableStorePath) - // if err != nil { - // s.logger.Fatal(err.Error()) - // return err - // } - default: - logStorePath := filepath.Join(s.dataDir, "raft", "log", "boltdb.db") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Dir(logStorePath), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - logStore, err = raftboltdb.NewBoltStore(logStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStorePath := filepath.Join(s.dataDir, "raft", "stable", "boltdb.db") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Dir(stableStorePath), 0755) - stableStore, err = raftboltdb.NewBoltStore(stableStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - } - - s.logger.Info("create Raft machine") - s.raft, err = raft.NewRaft(raftConfig, s.fsm, logStore, stableStore, snapshotStore, s.transport) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - if s.bootstrap { - s.logger.Info("configure Raft machine as bootstrap") - configuration := raft.Configuration{ - Servers: []raft.Server{ - { - ID: raftConfig.LocalID, - Address: s.transport.LocalAddr(), - }, - }, - } - s.raft.BootstrapCluster(configuration) - - s.logger.Info("wait for become a leader") - err = s.WaitForDetectLeader(60 * time.Second) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - // set node config - s.logger.Info("register its own node config", zap.Any("node", s.node)) - err = s.setNode(s.node) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - } - - return nil -} - -func (s *RaftServer) Stop() error { - s.logger.Info("shutdown Raft machine") - f := s.raft.Shutdown() - err := f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - s.logger.Info("stop finite state machine") - err = s.fsm.Stop() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) LeaderAddress(timeout time.Duration) (raft.ServerAddress, error) { - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - - timer := time.NewTimer(timeout) - defer timer.Stop() - - for { - select { - case <-ticker.C: - leaderAddr := s.raft.Leader() - if leaderAddr != "" { - s.logger.Debug("detect a leader", zap.String("address", string(leaderAddr))) - return leaderAddr, nil - } - case <-timer.C: - s.logger.Error("timeout exceeded") - return "", blasterrors.ErrTimeout - } - } -} - -func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { - leaderAddr, err := s.LeaderAddress(timeout) - if err != nil { - s.logger.Error(err.Error()) - return "", err - } - - cf := s.raft.GetConfiguration() - err = cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return "", err - } - - for _, server := range cf.Configuration().Servers { - if server.Address == leaderAddr { - return server.ID, nil - } - } - - s.logger.Error(blasterrors.ErrNotFoundLeader.Error()) - return "", blasterrors.ErrNotFoundLeader -} - -func (s *RaftServer) NodeAddress() string { - return string(s.transport.LocalAddr()) -} - -func (s *RaftServer) NodeID() string { - return s.node.Id -} - -func (s *RaftServer) Stats() map[string]string { - return s.raft.Stats() -} - -func (s *RaftServer) State() raft.RaftState { - return s.raft.State() -} - -func (s *RaftServer) IsLeader() bool { - return s.State() == raft.Leader -} - -func (s *RaftServer) WaitForDetectLeader(timeout time.Duration) error { - _, err := s.LeaderAddress(timeout) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) getNode(nodeId string) (*index.Node, error) { - nodeConfig, err := s.fsm.GetNode(nodeId) - if err != nil { - s.logger.Debug(err.Error(), zap.String("id", nodeId)) - return nil, err - } - - return nodeConfig, nil -} - -func (s *RaftServer) setNode(node *index.Node) error { - proposal := &index.Proposal{ - Event: index.Proposal_SET_NODE, - Node: node, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) - return err - } - - return nil -} - -func (s *RaftServer) deleteNode(nodeId string) error { - proposal := &index.Proposal{ - Event: index.Proposal_DELETE_NODE, - Node: &index.Node{ - Id: nodeId, - }, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err - } - - return nil -} - -func (s *RaftServer) GetNode(id string) (*index.Node, error) { - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - var node *index.Node - for _, server := range cf.Configuration().Servers { - if server.ID == raft.ServerID(id) { - node, err = s.getNode(id) - if err != nil { - s.logger.Debug(err.Error(), zap.String("id", id)) - return nil, err - } - break - } - } - - return node, nil -} - -func (s *RaftServer) SetNode(node *index.Node) error { - if !s.IsLeader() { - s.logger.Warn(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - for _, server := range cf.Configuration().Servers { - if server.ID == raft.ServerID(node.Id) { - s.logger.Info("node already joined the cluster", zap.Any("id", node.Id)) - return nil - } - } - - if node.BindAddress == "" { - err = errors.New("missing bind address") - s.logger.Error(err.Error(), zap.String("bind_addr", node.BindAddress)) - return err - } - - // add node to Raft cluster - s.logger.Info("join the node to the raft cluster", zap.String("id", node.Id), zap.Any("bind_address", node.BindAddress)) - f := s.raft.AddVoter(raft.ServerID(node.Id), raft.ServerAddress(node.BindAddress), 0, 0) - err = f.Error() - if err != nil { - s.logger.Error(err.Error(), zap.String("id", node.Id), zap.String("bind_address", node.BindAddress)) - return err - } - - // set node config - err = s.setNode(node) - if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) - return err - } - - return nil -} - -func (s *RaftServer) DeleteNode(nodeId string) error { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err - } - - // delete node from Raft cluster - for _, server := range cf.Configuration().Servers { - if server.ID == raft.ServerID(nodeId) { - s.logger.Info("remove the node from the raft cluster", zap.String("id", nodeId)) - f := s.raft.RemoveServer(server.ID, 0, 0) - err = f.Error() - if err != nil { - s.logger.Error(err.Error(), zap.String("id", string(server.ID))) - return err - } - } - } - - // delete node config - err = s.deleteNode(nodeId) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err - } - - return nil -} - -func (s *RaftServer) GetCluster() (*index.Cluster, error) { - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - cluster := &index.Cluster{Nodes: make(map[string]*index.Node, 0)} - for _, server := range cf.Configuration().Servers { - node, err := s.GetNode(string(server.ID)) - if err != nil { - s.logger.Debug(err.Error(), zap.String("id", string(server.ID))) - continue - } - - cluster.Nodes[string(server.ID)] = node - } - - return cluster, nil -} - -func (s *RaftServer) Snapshot() error { - f := s.raft.Snapshot() - err := f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) Get(id string) (map[string]interface{}, error) { - fields, err := s.fsm.GetDocument(id) - if err != nil { - switch err { - case blasterrors.ErrNotFound: - s.logger.Debug(err.Error(), zap.String("id", id)) - default: - s.logger.Error(err.Error(), zap.String("id", id)) - } - return nil, err - } - - return fields, nil -} - -func (s *RaftServer) Index(doc *index.Document) error { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - proposal := &index.Proposal{ - Event: index.Proposal_INDEX, - Document: doc, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) Delete(id string) error { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - proposal := &index.Proposal{ - Event: index.Proposal_DELETE, - Id: id, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) BulkIndex(docs []*index.Document) (int, error) { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return -1, raft.ErrNotLeader - } - - proposal := &index.Proposal{ - Event: index.Proposal_BULK_INDEX, - Documents: docs, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - err = f.Response().(*fsmBulkIndexResponse).error - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - - return f.Response().(*fsmBulkIndexResponse).count, nil -} - -func (s *RaftServer) BulkDelete(ids []string) (int, error) { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return -1, raft.ErrNotLeader - } - - proposal := &index.Proposal{ - Event: index.Proposal_BULK_DELETE, - Ids: ids, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - err = f.Response().(*fsmBulkDeleteResponse).error - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - - return f.Response().(*fsmBulkDeleteResponse).count, nil -} - -func (s *RaftServer) Search(request *bleve.SearchRequest) (*bleve.SearchResult, error) { - result, err := s.fsm.Search(request) - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return result, nil -} - -func (s *RaftServer) GetIndexConfig() (map[string]interface{}, error) { - indexConfig, err := s.fsm.GetIndexConfig() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return indexConfig, nil -} - -func (s *RaftServer) GetIndexStats() (map[string]interface{}, error) { - indexStats, err := s.fsm.GetIndexStats() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return indexStats, nil -} diff --git a/indexer/server.go b/indexer/server.go deleted file mode 100644 index dbea38b..0000000 --- a/indexer/server.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "encoding/json" - "fmt" - - accesslog "github.com/mash/go-accesslog" - "github.com/mosuka/blast/indexutils" - - "github.com/mosuka/blast/protobuf/management" - - "github.com/blevesearch/bleve/mapping" - "github.com/golang/protobuf/ptypes/empty" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/index" - "go.uber.org/zap" -) - -type Server struct { - managerGrpcAddress string - shardId string - peerGrpcAddress string - node *index.Node - dataDir string - raftStorageType string - indexMapping *mapping.IndexMappingImpl - indexType string - indexStorageType string - logger *zap.Logger - grpcLogger *zap.Logger - httpLogger accesslog.Logger - - raftServer *RaftServer - grpcService *GRPCService - grpcServer *GRPCServer - grpcGateway *GRPCGateway - httpRouter *Router - httpServer *HTTPServer -} - -func NewServer(managerGrpcAddress string, shardId string, peerGrpcAddress string, node *index.Node, dataDir string, raftStorageType string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { - return &Server{ - managerGrpcAddress: managerGrpcAddress, - shardId: shardId, - peerGrpcAddress: peerGrpcAddress, - node: node, - dataDir: dataDir, - raftStorageType: raftStorageType, - indexMapping: indexMapping, - indexType: indexType, - indexStorageType: indexStorageType, - logger: logger, - grpcLogger: grpcLogger, - httpLogger: httpLogger, - }, nil -} - -func (s *Server) Start() { - // get peer from manager - if s.managerGrpcAddress != "" { - s.logger.Info("connect to manager", zap.String("manager_grpc_addr", s.managerGrpcAddress)) - - mc, err := manager.NewGRPCClient(s.managerGrpcAddress) - defer func() { - s.logger.Debug("close client", zap.String("address", mc.GetAddress())) - err = mc.Close() - if err != nil { - s.logger.Error(err.Error()) - return - } - }() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - req := &management.GetRequest{ - Key: fmt.Sprintf("cluster/shards/%s", s.shardId), - } - res, err := mc.Get(req) - if err != nil && err != blasterrors.ErrNotFound { - s.logger.Fatal(err.Error()) - return - } - value, err := protobuf.MarshalAny(res.Value) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - if value != nil { - nodes := *value.(*map[string]interface{}) - nodesBytes, err := json.Marshal(nodes) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - var cluster *index.Cluster - err = json.Unmarshal(nodesBytes, &cluster) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - for id, node := range cluster.Nodes { - if id == s.node.Id { - s.logger.Debug("skip own node id", zap.String("id", id)) - continue - } - - s.logger.Info("peer node detected", zap.String("peer_grpc_addr", node.Metadata.GrpcAddress)) - s.peerGrpcAddress = node.Metadata.GrpcAddress - break - } - } - } - - //get index config from manager or peer - if s.managerGrpcAddress != "" { - mc, err := manager.NewGRPCClient(s.managerGrpcAddress) - defer func() { - s.logger.Debug("close client", zap.String("address", mc.GetAddress())) - err = mc.Close() - if err != nil { - s.logger.Error(err.Error()) - return - } - }() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - s.logger.Debug("pull index config from manager", zap.String("address", mc.GetAddress())) - req := &management.GetRequest{ - Key: "/index_config", - } - resp, err := mc.Get(req) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - value, err := protobuf.MarshalAny(resp.Value) - if value != nil { - indexConfigMap := *value.(*map[string]interface{}) - indexMappingSrc, ok := indexConfigMap["index_mapping"].(map[string]interface{}) - if ok { - indexMappingBytes, err := json.Marshal(indexMappingSrc) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - s.indexMapping, err = indexutils.NewIndexMappingFromBytes(indexMappingBytes) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - } - indexTypeSrc, ok := indexConfigMap["index_type"] - if ok { - s.indexType = indexTypeSrc.(string) - } - indexStorageTypeSrc, ok := indexConfigMap["index_storage_type"] - if ok { - s.indexStorageType = indexStorageTypeSrc.(string) - } - } - } else if s.peerGrpcAddress != "" { - pc, err := NewGRPCClient(s.peerGrpcAddress) - defer func() { - s.logger.Debug("close client", zap.String("address", pc.GetAddress())) - err = pc.Close() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - s.logger.Debug("pull index config from cluster peer", zap.String("address", pc.GetAddress())) - req := &empty.Empty{} - res, err := pc.GetIndexConfig(req) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - indexMapping, err := protobuf.MarshalAny(res.IndexConfig.IndexMapping) - s.indexMapping = indexMapping.(*mapping.IndexMappingImpl) - s.indexType = res.IndexConfig.IndexType - s.indexStorageType = res.IndexConfig.IndexStorageType - } - - // bootstrap node? - bootstrap := s.peerGrpcAddress == "" - s.logger.Info("bootstrap", zap.Bool("bootstrap", bootstrap)) - - var err error - - // create raft server - s.raftServer, err = NewRaftServer(s.node, s.dataDir, s.raftStorageType, s.indexMapping, s.indexType, s.indexStorageType, bootstrap, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC service - s.grpcService, err = NewGRPCService(s.managerGrpcAddress, s.shardId, s.raftServer, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC server - s.grpcServer, err = NewGRPCServer(s.node.Metadata.GrpcAddress, s.grpcService, s.grpcLogger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC gateway - s.grpcGateway, err = NewGRPCGateway(s.node.Metadata.GrpcGatewayAddress, s.node.Metadata.GrpcAddress, s.logger) - if err != nil { - s.logger.Error(err.Error()) - return - } - - // create HTTP router - s.httpRouter, err = NewRouter(s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create HTTP server - s.httpServer, err = NewHTTPServer(s.node.Metadata.HttpAddress, s.httpRouter, s.logger, s.httpLogger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // start Raft server - s.logger.Info("start Raft server") - err = s.raftServer.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // start gRPC service - s.logger.Info("start gRPC service") - go func() { - err := s.grpcService.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - - // start gRPC server - s.logger.Info("start gRPC server") - go func() { - err := s.grpcServer.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - - // start gRPC gateway - s.logger.Info("start gRPC gateway") - go func() { - _ = s.grpcGateway.Start() - }() - - // start HTTP server - s.logger.Info("start HTTP server") - go func() { - _ = s.httpServer.Start() - }() - - // join to the existing cluster - if !bootstrap { - client, err := NewGRPCClient(s.peerGrpcAddress) - defer func() { - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - }() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - req := &index.ClusterJoinRequest{ - Node: s.node, - } - - _, err = client.ClusterJoin(req) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - } -} - -func (s *Server) Stop() { - s.logger.Info("stop HTTP server") - err := s.httpServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop HTTP router") - err = s.httpRouter.Close() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC gateway") - err = s.grpcGateway.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC server") - err = s.grpcServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC service") - err = s.grpcService.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop Raft server") - err = s.raftServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } -} diff --git a/indexer/server_test.go b/indexer/server_test.go deleted file mode 100644 index 7563ed3..0000000 --- a/indexer/server_test.go +++ /dev/null @@ -1,2177 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "testing" - "time" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/mapping" - "github.com/golang/protobuf/ptypes/empty" - "github.com/google/go-cmp/cmp" - "github.com/mosuka/blast/indexutils" - "github.com/mosuka/blast/logutils" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/index" - "github.com/mosuka/blast/strutils" - "github.com/mosuka/blast/testutils" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func TestServer_Start(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) -} - -func TestServer_LivenessProbe(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // healthiness - reqHealthiness := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_HEALTHINESS} - resHealthiness, err := client.NodeHealthCheck(reqHealthiness) - if err != nil { - t.Fatalf("%v", err) - } - expHealthinessState := index.NodeHealthCheckResponse_HEALTHY - actHealthinessState := resHealthiness.State - if expHealthinessState != actHealthinessState { - t.Fatalf("expected content to see %v, saw %v", expHealthinessState, actHealthinessState) - } - - // liveness - reqLiveness := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_LIVENESS} - resLiveness, err := client.NodeHealthCheck(reqLiveness) - if err != nil { - t.Fatalf("%v", err) - } - expLivenessState := index.NodeHealthCheckResponse_ALIVE - actLivenessState := resLiveness.State - if expLivenessState != actLivenessState { - t.Fatalf("expected content to see %v, saw %v", expLivenessState, actLivenessState) - } - - // readiness - reqReadiness := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_READINESS} - resReadiness, err := client.NodeHealthCheck(reqReadiness) - if err != nil { - t.Fatalf("%v", err) - } - expReadinessState := index.NodeHealthCheckResponse_READY - actReadinessState := resReadiness.State - if expReadinessState != actReadinessState { - t.Fatalf("expected content to see %v, saw %v", expReadinessState, actReadinessState) - } -} - -func TestServer_GetNode(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get node - req := &empty.Empty{} - res, err := client.NodeInfo(req) - if err != nil { - t.Fatalf("%v", err) - } - expNodeInfo := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_LEADER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - actNodeInfo := res.Node - if !reflect.DeepEqual(expNodeInfo, actNodeInfo) { - t.Fatalf("expected content to see %v, saw %v", expNodeInfo, actNodeInfo) - } -} - -func TestServer_GetCluster(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get cluster - req := &empty.Empty{} - res, err := client.ClusterInfo(req) - if err != nil { - t.Fatalf("%v", err) - } - expCluster := &index.Cluster{ - Nodes: map[string]*index.Node{ - nodeId: { - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_LEADER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - }, - }, - } - actCluster := res.Cluster - if !reflect.DeepEqual(expCluster, actCluster) { - t.Fatalf("expected content to see %v, saw %v", expCluster, actCluster) - } -} - -func TestServer_GetIndexMapping(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - expIndexMapping := indexMapping - - req := &empty.Empty{} - res, err := client.GetIndexConfig(req) - if err != nil { - t.Fatalf("%v", err) - } - - im, err := protobuf.MarshalAny(res.IndexConfig.IndexMapping) - if err != nil { - t.Fatalf("%v", err) - } - actIndexMapping := im.(*mapping.IndexMappingImpl) - - exp, err := json.Marshal(expIndexMapping) - if err != nil { - t.Fatalf("%v", err) - } - act, err := json.Marshal(actIndexMapping) - if err != nil { - t.Fatalf("%v", err) - } - - if !reflect.DeepEqual(exp, act) { - t.Fatalf("expected content to see %v, saw %v", exp, act) - } -} - -func TestServer_GetIndexType(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - expIndexType := indexType - - req := &empty.Empty{} - res, err := client.GetIndexConfig(req) - if err != nil { - t.Fatalf("%v", err) - } - - actIndexType := res.IndexConfig.IndexType - - if !reflect.DeepEqual(expIndexType, actIndexType) { - t.Fatalf("expected content to see %v, saw %v", expIndexType, actIndexType) - } -} - -func TestServer_GetIndexStorageType(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - expIndexStorageType := indexStorageType - - req := &empty.Empty{} - res, err := client.GetIndexConfig(req) - if err != nil { - t.Fatalf("%v", err) - } - - actIndexStorageType := res.IndexConfig.IndexStorageType - - if !reflect.DeepEqual(expIndexStorageType, actIndexStorageType) { - t.Fatalf("expected content to see %v, saw %v", expIndexStorageType, actIndexStorageType) - } -} - -func TestServer_GetIndexStats(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - expIndexStats := map[string]interface{}{ - "index": map[string]interface{}{ - "analysis_time": float64(0), - "batches": float64(0), - "deletes": float64(0), - "errors": float64(0), - "index_time": float64(0), - "num_plain_text_bytes_indexed": float64(0), - "term_searchers_finished": float64(0), - "term_searchers_started": float64(0), - "updates": float64(0), - }, - "search_time": float64(0), - "searches": float64(0), - } - - req := &empty.Empty{} - res, err := client.GetIndexStats(req) - if err != nil { - t.Fatalf("%v", err) - } - - is, err := protobuf.MarshalAny(res.IndexStats) - if err != nil { - t.Fatalf("%v", err) - } - actIndexStats := *is.(*map[string]interface{}) - - if !reflect.DeepEqual(expIndexStats, actIndexStats) { - t.Fatalf("expected content to see %v, saw %v", expIndexStats, actIndexStats) - } -} - -func TestServer_Index(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // index document - docPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - docFile1, err := os.Open(docPath1) - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - _ = docFile1.Close() - }() - docBytes1, err := ioutil.ReadAll(docFile1) - if err != nil { - t.Fatalf("%v", err) - } - doc1 := &index.Document{} - err = index.UnmarshalDocument(docBytes1, doc1) - if err != nil { - t.Fatalf("%v", err) - } - req := &index.IndexRequest{ - Id: doc1.Id, - Fields: doc1.Fields, - } - _, err = client.Index(req) - if err != nil { - t.Fatalf("%v", err) - } -} - -func TestServer_Get(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // index document - docPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - docFile1, err := os.Open(docPath1) - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - _ = docFile1.Close() - }() - docBytes1, err := ioutil.ReadAll(docFile1) - if err != nil { - t.Fatalf("%v", err) - } - doc1 := &index.Document{} - err = index.UnmarshalDocument(docBytes1, doc1) - if err != nil { - t.Fatalf("%v", err) - } - indexReq := &index.IndexRequest{ - Id: doc1.Id, - Fields: doc1.Fields, - } - _, err = client.Index(indexReq) - if err != nil { - t.Fatalf("%v", err) - } - - // get document - getReq := &index.GetRequest{Id: "enwiki_1"} - getRes, err := client.Get(getReq) - if err != nil { - t.Fatalf("%v", err) - } - expFields, err := protobuf.MarshalAny(doc1.Fields) - if err != nil { - t.Fatalf("%v", err) - } - actFields, err := protobuf.MarshalAny(getRes.Fields) - if err != nil { - t.Fatalf("%v", err) - } - if !cmp.Equal(expFields, actFields) { - t.Fatalf("expected content to see %v, saw %v", expFields, actFields) - } - - // get non-existing document - getReq2 := &index.GetRequest{Id: "non-existing"} - getRes2, err := client.Get(getReq2) - if err != nil { - st, _ := status.FromError(err) - switch st.Code() { - case codes.NotFound: - // noop - default: - t.Fatalf("%v", err) - } - } - if getRes2 != nil { - t.Fatalf("expected content to see nil, saw %v", getRes2) - } -} - -func TestServer_Delete(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // index document - docPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - docFile1, err := os.Open(docPath1) - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - _ = docFile1.Close() - }() - docBytes1, err := ioutil.ReadAll(docFile1) - if err != nil { - t.Fatalf("%v", err) - } - doc1 := &index.Document{} - err = index.UnmarshalDocument(docBytes1, doc1) - if err != nil { - t.Fatalf("%v", err) - } - indexReq := &index.IndexRequest{ - Id: doc1.Id, - Fields: doc1.Fields, - } - _, err = client.Index(indexReq) - if err != nil { - t.Fatalf("%v", err) - } - - // get document - getReq := &index.GetRequest{Id: "enwiki_1"} - getRes, err := client.Get(getReq) - if err != nil { - t.Fatalf("%v", err) - } - expFields, err := protobuf.MarshalAny(doc1.Fields) - if err != nil { - t.Fatalf("%v", err) - } - actFields, err := protobuf.MarshalAny(getRes.Fields) - if err != nil { - t.Fatalf("%v", err) - } - if !cmp.Equal(expFields, actFields) { - t.Fatalf("expected content to see %v, saw %v", expFields, actFields) - } - - // delete document - deleteReq := &index.DeleteRequest{Id: "enwiki_1"} - _, err = client.Delete(deleteReq) - if err != nil { - t.Fatalf("%v", err) - } - - // get document again - getRes, err = client.Get(getReq) - if err != nil { - st, _ := status.FromError(err) - switch st.Code() { - case codes.NotFound: - // noop - default: - t.Fatalf("%v", err) - } - } - if getRes != nil { - t.Fatalf("expected content to see nil, saw %v", getRes) - } - - // delete non-existing document - deleteReq2 := &index.DeleteRequest{Id: "non-existing"} - _, err = client.Delete(deleteReq2) - if err != nil { - st, _ := status.FromError(err) - switch st.Code() { - case codes.NotFound: - // noop - default: - t.Fatalf("%v", err) - } - } -} - -func TestServer_Search(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // index document - docPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - docFile1, err := os.Open(docPath1) - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - _ = docFile1.Close() - }() - docBytes1, err := ioutil.ReadAll(docFile1) - if err != nil { - t.Fatalf("%v", err) - } - doc1 := &index.Document{} - err = index.UnmarshalDocument(docBytes1, doc1) - if err != nil { - t.Fatalf("%v", err) - } - indexReq := &index.IndexRequest{ - Id: doc1.Id, - Fields: doc1.Fields, - } - _, err = client.Index(indexReq) - if err != nil { - t.Fatalf("%v", err) - } - - // get document - getReq := &index.GetRequest{Id: "enwiki_1"} - getRes, err := client.Get(getReq) - if err != nil { - t.Fatalf("%v", err) - } - expFields, err := protobuf.MarshalAny(doc1.Fields) - if err != nil { - t.Fatalf("%v", err) - } - actFields, err := protobuf.MarshalAny(getRes.Fields) - if err != nil { - t.Fatalf("%v", err) - } - if !cmp.Equal(expFields, actFields) { - t.Fatalf("expected content to see %v, saw %v", expFields, actFields) - } - - // search - searchRequestPath := filepath.Join(curDir, "../example/wiki_search_request.json") - searchRequestFile, err := os.Open(searchRequestPath) - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - _ = searchRequestFile.Close() - }() - searchRequestByte, err := ioutil.ReadAll(searchRequestFile) - if err != nil { - t.Fatalf("%v", err) - } - - searchReq := &index.SearchRequest{} - marshaler := JsonMarshaler{} - err = marshaler.Unmarshal(searchRequestByte, searchReq) - if err != nil { - t.Fatalf("%v", err) - } - searchRes, err := client.Search(searchReq) - if err != nil { - t.Fatalf("%v", err) - } - searchResult, err := protobuf.MarshalAny(searchRes.SearchResult) - if err != nil { - t.Fatalf("%v", err) - } - expTotal := uint64(1) - actTotal := searchResult.(*bleve.SearchResult).Total - if expTotal != actTotal { - t.Fatalf("expected content to see %v, saw %v", expTotal, actTotal) - } -} - -func TestCluster_Start(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress1 := "" - shardId1 := "" - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &index.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - server1.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - managerGrpcAddress2 := "" - shardId2 := "" - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &index.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - server2.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - managerGrpcAddress3 := "" - shardId3 := "" - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &index.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - server3.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) -} - -func TestCluster_HealthCheck(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress1 := "" - shardId1 := "" - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &index.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - server1.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - managerGrpcAddress2 := "" - shardId2 := "" - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &index.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - server2.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - managerGrpcAddress3 := "" - shardId3 := "" - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &index.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - server3.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - - healthinessReq := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_HEALTHINESS} - livenessReq := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_LIVENESS} - readinessReq := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_READINESS} - - // healthiness - healthinessRes1, err := client1.NodeHealthCheck(healthinessReq) - if err != nil { - t.Fatalf("%v", err) - } - expHealthiness1 := index.NodeHealthCheckResponse_HEALTHY - actHealthiness1 := healthinessRes1.State - if expHealthiness1 != actHealthiness1 { - t.Fatalf("expected content to see %v, saw %v", expHealthiness1, actHealthiness1) - } - - // liveness - livenessRes1, err := client1.NodeHealthCheck(livenessReq) - if err != nil { - t.Fatalf("%v", err) - } - expLiveness1 := index.NodeHealthCheckResponse_ALIVE - actLiveness1 := livenessRes1.State - if expLiveness1 != actLiveness1 { - t.Fatalf("expected content to see %v, saw %v", expLiveness1, actLiveness1) - } - - // readiness - readinessRes1, err := client1.NodeHealthCheck(readinessReq) - if err != nil { - t.Fatalf("%v", err) - } - expReadiness1 := index.NodeHealthCheckResponse_READY - actReadiness1 := readinessRes1.State - if expReadiness1 != actReadiness1 { - t.Fatalf("expected content to see %v, saw %v", expReadiness1, actReadiness1) - } - - // healthiness - healthinessRes2, err := client2.NodeHealthCheck(healthinessReq) - if err != nil { - t.Fatalf("%v", err) - } - expHealthiness2 := index.NodeHealthCheckResponse_HEALTHY - actHealthiness2 := healthinessRes2.State - if expHealthiness2 != actHealthiness2 { - t.Fatalf("expected content to see %v, saw %v", expHealthiness2, actHealthiness2) - } - - // liveness - livenessRes2, err := client2.NodeHealthCheck(livenessReq) - if err != nil { - t.Fatalf("%v", err) - } - expLiveness2 := index.NodeHealthCheckResponse_ALIVE - actLiveness2 := livenessRes2.State - if expLiveness2 != actLiveness2 { - t.Fatalf("expected content to see %v, saw %v", expLiveness2, actLiveness2) - } - - // readiness - readinessRes2, err := client2.NodeHealthCheck(readinessReq) - if err != nil { - t.Fatalf("%v", err) - } - expReadiness2 := index.NodeHealthCheckResponse_READY - actReadiness2 := readinessRes2.State - if expReadiness2 != actReadiness2 { - t.Fatalf("expected content to see %v, saw %v", expReadiness2, actReadiness2) - } - - // healthiness - healthinessRes3, err := client3.NodeHealthCheck(healthinessReq) - if err != nil { - t.Fatalf("%v", err) - } - expHealthiness3 := index.NodeHealthCheckResponse_HEALTHY - actHealthiness3 := healthinessRes3.State - if expHealthiness3 != actHealthiness3 { - t.Fatalf("expected content to see %v, saw %v", expHealthiness3, actHealthiness3) - } - - // liveness - livenessRes3, err := client3.NodeHealthCheck(livenessReq) - if err != nil { - t.Fatalf("%v", err) - } - expLiveness3 := index.NodeHealthCheckResponse_ALIVE - actLiveness3 := livenessRes3.State - if expLiveness3 != actLiveness3 { - t.Fatalf("expected content to see %v, saw %v", expLiveness3, actLiveness3) - } - - // readiness - readinessRes3, err := client3.NodeHealthCheck(readinessReq) - if err != nil { - t.Fatalf("%v", err) - } - expReadiness3 := index.NodeHealthCheckResponse_READY - actReadiness3 := readinessRes3.State - if expReadiness3 != actReadiness3 { - t.Fatalf("expected content to see %v, saw %v", expReadiness3, actReadiness3) - } -} - -func TestCluster_GetNode(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress1 := "" - shardId1 := "" - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &index.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - server1.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - managerGrpcAddress2 := "" - shardId2 := "" - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &index.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - server2.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - managerGrpcAddress3 := "" - shardId3 := "" - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &index.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - server3.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get all node info from all nodes - node11, err := client1.NodeInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expNode11 := &index.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: index.Node_LEADER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - actNode11 := node11.Node - if !reflect.DeepEqual(expNode11, actNode11) { - t.Fatalf("expected content to see %v, saw %v", expNode11, actNode11) - } - - node21, err := client2.NodeInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expNode21 := &index.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - actNode21 := node21.Node - if !reflect.DeepEqual(expNode21, actNode21) { - t.Fatalf("expected content to see %v, saw %v", expNode21, actNode21) - } - - node31, err := client3.NodeInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expNode31 := &index.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - actNode31 := node31.Node - if !reflect.DeepEqual(expNode31, actNode31) { - t.Fatalf("expected content to see %v, saw %v", expNode31, actNode31) - } -} - -func TestCluster_GetCluster(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress1 := "" - shardId1 := "" - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &index.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - server1.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - managerGrpcAddress2 := "" - shardId2 := "" - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &index.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - server2.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - managerGrpcAddress3 := "" - shardId3 := "" - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &index.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - server3.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get cluster info from manager1 - cluster1, err := client1.ClusterInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expCluster1 := &index.Cluster{ - Nodes: map[string]*index.Node{ - nodeId1: { - Id: nodeId1, - BindAddress: bindAddress1, - State: index.Node_LEADER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - }, - nodeId2: { - Id: nodeId2, - BindAddress: bindAddress2, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - }, - nodeId3: { - Id: nodeId3, - BindAddress: bindAddress3, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - }, - }, - } - actCluster1 := cluster1.Cluster - if !reflect.DeepEqual(expCluster1, actCluster1) { - t.Fatalf("expected content to see %v, saw %v", expCluster1, actCluster1) - } - - cluster2, err := client2.ClusterInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expCluster2 := &index.Cluster{ - Nodes: map[string]*index.Node{ - nodeId1: { - Id: nodeId1, - BindAddress: bindAddress1, - State: index.Node_LEADER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - }, - nodeId2: { - Id: nodeId2, - BindAddress: bindAddress2, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - }, - nodeId3: { - Id: nodeId3, - BindAddress: bindAddress3, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - }, - }, - } - actCluster2 := cluster2.Cluster - if !reflect.DeepEqual(expCluster2, actCluster2) { - t.Fatalf("expected content to see %v, saw %v", expCluster2, actCluster2) - } - - cluster3, err := client3.ClusterInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expCluster3 := &index.Cluster{ - Nodes: map[string]*index.Node{ - nodeId1: { - Id: nodeId1, - BindAddress: bindAddress1, - State: index.Node_LEADER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - }, - nodeId2: { - Id: nodeId2, - BindAddress: bindAddress2, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - }, - nodeId3: { - Id: nodeId3, - BindAddress: bindAddress3, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - }, - }, - } - actCluster3 := cluster3.Cluster - if !reflect.DeepEqual(expCluster3, actCluster3) { - t.Fatalf("expected content to see %v, saw %v", expCluster3, actCluster3) - } -} diff --git a/logutils/logger.go b/log/log.go similarity index 54% rename from logutils/logger.go rename to log/log.go index 28611dd..5470fdf 100644 --- a/logutils/logger.go +++ b/log/log.go @@ -1,22 +1,10 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logutils +package log import ( "os" + "strconv" + accesslog "github.com/mash/go-accesslog" "github.com/natefinch/lumberjack" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -42,9 +30,12 @@ func NewLogger(logLevel string, logFilename string, logMaxSize int, logMaxBackup } var ws zapcore.WriteSyncer - if logFilename == "" { + switch logFilename { + case "", os.Stderr.Name(): ws = zapcore.AddSync(os.Stderr) - } else { + case os.Stdout.Name(): + ws = zapcore.AddSync(os.Stdout) + default: ws = zapcore.AddSync( &lumberjack.Logger{ Filename: logFilename, @@ -74,7 +65,43 @@ func NewLogger(logLevel string, logFilename string, logMaxSize int, logMaxBackup ), zap.AddCaller(), //zap.AddStacktrace(ll), - ) + ).Named("blast") return logger } + +type HTTPLogger struct { + Logger *zap.Logger +} + +func (l HTTPLogger) Log(record accesslog.LogRecord) { + // Output log that formatted Apache combined. + size := "-" + if record.Size > 0 { + size = strconv.FormatInt(record.Size, 10) + } + + referer := "-" + if record.RequestHeader.Get("Referer") != "" { + referer = record.RequestHeader.Get("Referer") + } + + userAgent := "-" + if record.RequestHeader.Get("User-Agent") != "" { + userAgent = record.RequestHeader.Get("User-Agent") + } + + l.Logger.Info( + "", + zap.String("ip", record.Ip), + zap.String("username", record.Username), + zap.String("time", record.Time.Format("02/Jan/2006 03:04:05 +0000")), + zap.String("method", record.Method), + zap.String("uri", record.Uri), + zap.String("protocol", record.Protocol), + zap.Int("status", record.Status), + zap.String("size", size), + zap.String("referer", referer), + zap.String("user_agent", userAgent), + ) +} diff --git a/logutils/grpc_logger.go b/logutils/grpc_logger.go deleted file mode 100644 index 85d6fa9..0000000 --- a/logutils/grpc_logger.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logutils - -import ( - "os" - - "github.com/natefinch/lumberjack" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -func NewGRPCLogger(logLevel string, logFilename string, logMaxSize int, logMaxBackups int, logMaxAge int, logCompress bool) *zap.Logger { - var ll zapcore.Level - switch logLevel { - case "DEBUG": - ll = zap.DebugLevel - case "INFO": - ll = zap.InfoLevel - case "WARN", "WARNING": - ll = zap.WarnLevel - case "ERR", "ERROR": - ll = zap.WarnLevel - case "DPANIC": - ll = zap.DPanicLevel - case "PANIC": - ll = zap.PanicLevel - case "FATAL": - ll = zap.FatalLevel - } - - var ws zapcore.WriteSyncer - if logFilename == "" { - ws = zapcore.AddSync(os.Stderr) - } else { - ws = zapcore.AddSync( - &lumberjack.Logger{ - Filename: logFilename, - MaxSize: logMaxSize, // megabytes - MaxBackups: logMaxBackups, - MaxAge: logMaxAge, // days - Compress: logCompress, - }, - ) - } - - ec := zap.NewProductionEncoderConfig() - ec.TimeKey = "_timestamp_" - ec.LevelKey = "_level_" - ec.NameKey = "_name_" - ec.CallerKey = "_caller_" - ec.MessageKey = "_message_" - ec.StacktraceKey = "_stacktrace_" - ec.EncodeTime = zapcore.ISO8601TimeEncoder - ec.EncodeCaller = zapcore.ShortCallerEncoder - - logger := zap.New( - zapcore.NewCore( - zapcore.NewJSONEncoder(ec), - ws, - ll, - ), - //zap.AddCaller(), - //zap.AddStacktrace(ll), - ) - - return logger -} diff --git a/logutils/http_logger.go b/logutils/http_logger.go deleted file mode 100644 index bb4371f..0000000 --- a/logutils/http_logger.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logutils - -import ( - "io" - "log" - "os" - "strconv" - - accesslog "github.com/mash/go-accesslog" - "github.com/natefinch/lumberjack" -) - -func NewFileWriter(filename string, maxSize int, maxBackups int, maxAge int, compress bool) io.Writer { - var writer io.Writer - - switch filename { - case "", os.Stderr.Name(): - writer = os.Stderr - case os.Stdout.Name(): - writer = os.Stdout - default: - writer = &lumberjack.Logger{ - Filename: filename, - MaxSize: maxSize, // megabytes - MaxBackups: maxBackups, - MaxAge: maxAge, // days - Compress: compress, // disabled by default - } - } - - return writer -} - -type ApacheCombinedLogger struct { - logger *log.Logger -} - -func NewApacheCombinedLogger(filename string, maxSize int, maxBackups int, maxAge int, compress bool) *ApacheCombinedLogger { - writer := NewFileWriter(filename, maxSize, maxBackups, maxAge, compress) - return &ApacheCombinedLogger{ - logger: log.New(writer, "", 0), - } -} - -func (l ApacheCombinedLogger) Log(record accesslog.LogRecord) { - // Output log that formatted Apache combined. - size := "-" - if record.Size > 0 { - size = strconv.FormatInt(record.Size, 10) - } - - referer := "-" - if record.RequestHeader.Get("Referer") != "" { - referer = record.RequestHeader.Get("Referer") - } - - userAgent := "-" - if record.RequestHeader.Get("User-Agent") != "" { - userAgent = record.RequestHeader.Get("User-Agent") - } - - l.logger.Printf( - "%s - %s [%s] \"%s %s %s\" %d %s \"%s\" \"%s\" %.4f", - record.Ip, - record.Username, - record.Time.Format("02/Jan/2006 03:04:05 +0000"), - record.Method, - record.Uri, - record.Protocol, - record.Status, - size, - referer, - userAgent, - record.ElapsedTime.Seconds(), - ) -} diff --git a/main.go b/main.go new file mode 100644 index 0000000..3ad98ef --- /dev/null +++ b/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "os" + + "github.com/mosuka/blast/cmd" +) + +func main() { + if err := cmd.Execute(); err != nil { + os.Exit(1) + } + + os.Exit(0) +} diff --git a/manager/grpc_client.go b/manager/grpc_client.go deleted file mode 100644 index 4d732a4..0000000 --- a/manager/grpc_client.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "context" - "math" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/protobuf/management" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type GRPCClient struct { - ctx context.Context - cancel context.CancelFunc - conn *grpc.ClientConn - client management.ManagementClient -} - -func NewGRPCContext() (context.Context, context.CancelFunc) { - baseCtx := context.TODO() - //return context.WithTimeout(baseCtx, 60*time.Second) - return context.WithCancel(baseCtx) -} - -func NewGRPCClient(address string) (*GRPCClient, error) { - ctx, cancel := NewGRPCContext() - - //streamRetryOpts := []grpc_retry.CallOption{ - // grpc_retry.Disable(), - //} - - //unaryRetryOpts := []grpc_retry.CallOption{ - // grpc_retry.WithBackoff(grpc_retry.BackoffLinear(100 * time.Millisecond)), - // grpc_retry.WithCodes(codes.Unavailable), - // grpc_retry.WithMax(100), - //} - - dialOpts := []grpc.DialOption{ - grpc.WithInsecure(), - grpc.WithDefaultCallOptions( - grpc.MaxCallSendMsgSize(math.MaxInt32), - grpc.MaxCallRecvMsgSize(math.MaxInt32), - ), - //grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(streamRetryOpts...)), - //grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(unaryRetryOpts...)), - } - - conn, err := grpc.DialContext(ctx, address, dialOpts...) - if err != nil { - return nil, err - } - - return &GRPCClient{ - ctx: ctx, - cancel: cancel, - conn: conn, - client: management.NewManagementClient(conn), - }, nil -} - -func (c *GRPCClient) Cancel() { - c.cancel() -} - -func (c *GRPCClient) Close() error { - c.Cancel() - if c.conn != nil { - return c.conn.Close() - } - - return c.ctx.Err() -} - -func (c *GRPCClient) GetAddress() string { - return c.conn.Target() -} - -func (c *GRPCClient) NodeHealthCheck(req *management.NodeHealthCheckRequest, opts ...grpc.CallOption) (*management.NodeHealthCheckResponse, error) { - return c.client.NodeHealthCheck(c.ctx, req, opts...) -} - -func (c *GRPCClient) NodeInfo(req *empty.Empty, opts ...grpc.CallOption) (*management.NodeInfoResponse, error) { - return c.client.NodeInfo(c.ctx, req, opts...) -} - -func (c *GRPCClient) ClusterJoin(req *management.ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.ClusterJoin(c.ctx, req, opts...) -} - -func (c *GRPCClient) ClusterLeave(req *management.ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.ClusterLeave(c.ctx, req, opts...) -} - -func (c *GRPCClient) ClusterInfo(req *empty.Empty, opts ...grpc.CallOption) (*management.ClusterInfoResponse, error) { - return c.client.ClusterInfo(c.ctx, &empty.Empty{}, opts...) -} - -func (c *GRPCClient) ClusterWatch(req *empty.Empty, opts ...grpc.CallOption) (management.Management_ClusterWatchClient, error) { - return c.client.ClusterWatch(c.ctx, req, opts...) -} - -func (c *GRPCClient) Get(req *management.GetRequest, opts ...grpc.CallOption) (*management.GetResponse, error) { - res, err := c.client.Get(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - switch st.Code() { - case codes.NotFound: - return &management.GetResponse{}, nil - default: - return nil, err - } - } - return res, nil -} - -func (c *GRPCClient) Set(req *management.SetRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.Set(c.ctx, req, opts...) -} - -func (c *GRPCClient) Delete(req *management.DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - res, err := c.client.Delete(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - switch st.Code() { - case codes.NotFound: - return &empty.Empty{}, nil - default: - return nil, err - } - } - return res, nil -} - -func (c *GRPCClient) Watch(req *management.WatchRequest, opts ...grpc.CallOption) (management.Management_WatchClient, error) { - return c.client.Watch(c.ctx, req, opts...) -} - -func (c *GRPCClient) Snapshot(req *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.Snapshot(c.ctx, &empty.Empty{}) -} diff --git a/manager/grpc_gateway.go b/manager/grpc_gateway.go deleted file mode 100644 index 3f505d4..0000000 --- a/manager/grpc_gateway.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - - "github.com/golang/protobuf/ptypes/any" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/management" - "go.uber.org/zap" - "google.golang.org/grpc" -) - -type JsonMarshaler struct{} - -// ContentType always Returns "application/json". -func (*JsonMarshaler) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -func (j *JsonMarshaler) Marshal(v interface{}) ([]byte, error) { - switch v.(type) { - case *management.GetResponse: - value, err := protobuf.MarshalAny(v.(*management.GetResponse).Value) - if err != nil { - return nil, err - } - return json.Marshal( - map[string]interface{}{ - "value": value, - }, - ) - default: - return json.Marshal(v) - } -} - -// Unmarshal unmarshals JSON data into "v". -func (j *JsonMarshaler) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// NewDecoder returns a Decoder which reads JSON stream from "r". -func (j *JsonMarshaler) NewDecoder(r io.Reader) runtime.Decoder { - return runtime.DecoderFunc( - func(v interface{}) error { - buffer, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - switch v.(type) { - case *management.SetRequest: - var tmpValue map[string]interface{} - err = json.Unmarshal(buffer, &tmpValue) - if err != nil { - return err - } - value, ok := tmpValue["value"] - if !ok { - return errors.New("value does not exist") - } - v.(*management.SetRequest).Value = &any.Any{} - return protobuf.UnmarshalAny(value, v.(*management.SetRequest).Value) - default: - return json.Unmarshal(buffer, v) - } - }, - ) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JsonMarshaler) NewEncoder(w io.Writer) runtime.Encoder { - return json.NewEncoder(w) -} - -// Delimiter for newline encoded JSON streams. -func (j *JsonMarshaler) Delimiter() []byte { - return []byte("\n") -} - -type GRPCGateway struct { - grpcGatewayAddr string - grpcAddr string - logger *zap.Logger - - ctx context.Context - cancel context.CancelFunc - listener net.Listener -} - -func NewGRPCGateway(grpcGatewayAddr string, grpcAddr string, logger *zap.Logger) (*GRPCGateway, error) { - return &GRPCGateway{ - grpcGatewayAddr: grpcGatewayAddr, - grpcAddr: grpcAddr, - logger: logger, - }, nil -} - -func (s *GRPCGateway) Start() error { - s.ctx, s.cancel = NewGRPCContext() - - mux := runtime.NewServeMux( - runtime.WithMarshalerOption("application/json", new(JsonMarshaler)), - ) - opts := []grpc.DialOption{grpc.WithInsecure()} - - err := management.RegisterManagementHandlerFromEndpoint(s.ctx, mux, s.grpcAddr, opts) - if err != nil { - return err - } - - s.listener, err = net.Listen("tcp", s.grpcGatewayAddr) - if err != nil { - return err - } - - err = http.Serve(s.listener, mux) - if err != nil { - return err - } - - return nil -} - -func (s *GRPCGateway) Stop() error { - defer s.cancel() - - err := s.listener.Close() - if err != nil { - return err - } - - return nil -} - -func (s *GRPCGateway) GetAddress() (string, error) { - tcpAddr, err := net.ResolveTCPAddr("tcp", s.listener.Addr().String()) - if err != nil { - return "", err - } - - v4Addr := "" - if tcpAddr.IP.To4() != nil { - v4Addr = tcpAddr.IP.To4().String() - } - port := tcpAddr.Port - - return fmt.Sprintf("%s:%d", v4Addr, port), nil -} diff --git a/manager/grpc_server.go b/manager/grpc_server.go deleted file mode 100644 index 8d17486..0000000 --- a/manager/grpc_server.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "fmt" - "net" - - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/mosuka/blast/protobuf/management" - "go.uber.org/zap" - "google.golang.org/grpc" - //grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" - //grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" - //grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags" - //grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" -) - -type GRPCServer struct { - service management.ManagementServer - server *grpc.Server - listener net.Listener - - logger *zap.Logger -} - -func NewGRPCServer(grpcAddr string, service management.ManagementServer, logger *zap.Logger) (*GRPCServer, error) { - server := grpc.NewServer( - grpc.StreamInterceptor( - grpc_middleware.ChainStreamServer( - //grpc_ctxtags.StreamServerInterceptor(), - //grpc_opentracing.StreamServerInterceptor(), - grpc_prometheus.StreamServerInterceptor, - grpc_zap.StreamServerInterceptor(logger), - //grpc_auth.StreamServerInterceptor(myAuthFunction), - //grpc_recovery.StreamServerInterceptor(), - ), - ), - grpc.UnaryInterceptor( - grpc_middleware.ChainUnaryServer( - //grpc_ctxtags.UnaryServerInterceptor(), - //grpc_opentracing.UnaryServerInterceptor(), - grpc_prometheus.UnaryServerInterceptor, - grpc_zap.UnaryServerInterceptor(logger), - //grpc_auth.UnaryServerInterceptor(myAuthFunction), - //grpc_recovery.UnaryServerInterceptor(), - ), - ), - ) - - management.RegisterManagementServer(server, service) - - grpc_prometheus.EnableHandlingTimeHistogram() - grpc_prometheus.Register(server) - - listener, err := net.Listen("tcp", grpcAddr) - if err != nil { - return nil, err - } - - return &GRPCServer{ - service: service, - server: server, - listener: listener, - logger: logger, - }, nil -} - -func (s *GRPCServer) Start() error { - s.logger.Info("start server") - err := s.server.Serve(s.listener) - if err != nil { - return err - } - - return nil -} - -func (s *GRPCServer) Stop() error { - s.logger.Info("stop server") - s.server.Stop() - //s.server.GracefulStop() - - return nil -} - -func (s *GRPCServer) GetAddress() (string, error) { - tcpAddr, err := net.ResolveTCPAddr("tcp", s.listener.Addr().String()) - if err != nil { - return "", err - } - - v4Addr := "" - if tcpAddr.IP.To4() != nil { - v4Addr = tcpAddr.IP.To4().String() - } - port := tcpAddr.Port - - return fmt.Sprintf("%s:%d", v4Addr, port), nil -} diff --git a/manager/grpc_service.go b/manager/grpc_service.go deleted file mode 100644 index c79f7ad..0000000 --- a/manager/grpc_service.go +++ /dev/null @@ -1,714 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "context" - "encoding/json" - "errors" - "strings" - "sync" - "time" - - "github.com/golang/protobuf/ptypes/any" - "github.com/golang/protobuf/ptypes/empty" - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/raft" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/management" - "go.uber.org/zap" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type GRPCService struct { - raftServer *RaftServer - logger *zap.Logger - - updateClusterStopCh chan struct{} - updateClusterDoneCh chan struct{} - peers *management.Cluster - peerClients map[string]*GRPCClient - cluster *management.Cluster - clusterChans map[chan management.ClusterWatchResponse]struct{} - clusterMutex sync.RWMutex - - stateChans map[chan management.WatchResponse]struct{} - stateMutex sync.RWMutex -} - -func NewGRPCService(raftServer *RaftServer, logger *zap.Logger) (*GRPCService, error) { - return &GRPCService{ - raftServer: raftServer, - logger: logger, - - peers: &management.Cluster{Nodes: make(map[string]*management.Node, 0)}, - peerClients: make(map[string]*GRPCClient, 0), - cluster: &management.Cluster{Nodes: make(map[string]*management.Node, 0)}, - clusterChans: make(map[chan management.ClusterWatchResponse]struct{}), - - stateChans: make(map[chan management.WatchResponse]struct{}), - }, nil -} - -func (s *GRPCService) Start() error { - s.logger.Info("start to update cluster info") - go s.startUpdateCluster(500 * time.Millisecond) - - return nil -} - -func (s *GRPCService) Stop() error { - s.logger.Info("stop to update cluster info") - s.stopUpdateCluster() - - return nil -} - -func (s *GRPCService) getLeaderClient() (*GRPCClient, error) { - //leaderId, err := s.raftServer.LeaderID(10 * time.Second) - //if err != nil { - // return nil, err - //} - //client, exist := s.peerClients[string(leaderId)] - //if !exist { - // err := errors.New("there is no client for leader") - // s.logger.Error(err.Error()) - // return nil, err - //} - //return client, nil - - for id, node := range s.cluster.Nodes { - switch node.State { - case management.Node_LEADER: - } - if client, exist := s.peerClients[id]; exist { - return client, nil - } - } - - err := errors.New("there is no client for leader") - s.logger.Error(err.Error()) - return nil, err -} - -func (s *GRPCService) cloneCluster(cluster *management.Cluster) (*management.Cluster, error) { - b, err := json.Marshal(cluster) - if err != nil { - return nil, err - } - - var clone *management.Cluster - err = json.Unmarshal(b, &clone) - if err != nil { - return nil, err - } - - return clone, nil -} - -func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { - s.updateClusterStopCh = make(chan struct{}) - s.updateClusterDoneCh = make(chan struct{}) - - defer func() { - close(s.updateClusterDoneCh) - }() - - ticker := time.NewTicker(checkInterval) - defer ticker.Stop() - - savedCluster, err := s.cloneCluster(s.cluster) - if err != nil { - s.logger.Error(err.Error()) - return - } - - for { - select { - case <-s.updateClusterStopCh: - s.logger.Info("received a request to stop updating a cluster") - return - case <-ticker.C: - s.cluster, err = s.getCluster() - if err != nil { - s.logger.Error(err.Error()) - return - } - - snapshotCluster, err := s.cloneCluster(s.cluster) - if err != nil { - s.logger.Error(err.Error()) - return - } - - // create peer node list with out self node - for id, node := range snapshotCluster.Nodes { - if id != s.NodeID() { - s.peers.Nodes[id] = node - } - } - - // open clients for peer nodes - for id, node := range s.peers.Nodes { - if node.Metadata.GrpcAddress == "" { - s.logger.Debug("missing gRPC address", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - - client, exist := s.peerClients[id] - if exist { - if client.GetAddress() != node.Metadata.GrpcAddress { - s.logger.Info("recreate gRPC client", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - delete(s.peerClients, id) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id)) - } - newClient, err := NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - s.peerClients[id] = newClient - } - } else { - s.logger.Info("create gRPC client", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - newClient, err := NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - s.peerClients[id] = newClient - } - } - - // close clients for non-existent peer nodes - for id, client := range s.peerClients { - if _, exist := s.peers.Nodes[id]; !exist { - s.logger.Info("close gRPC client", zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) - } - delete(s.peerClients, id) - } - } - - // check joined and updated nodes - for id, node := range snapshotCluster.Nodes { - nodeSnapshot, exist := savedCluster.Nodes[id] - if exist { - // node exists in the cluster - n1, err := json.Marshal(node) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", node)) - continue - } - n2, err := json.Marshal(nodeSnapshot) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", nodeSnapshot)) - continue - } - if !cmp.Equal(n1, n2) { - // node updated - // notify the cluster changes - clusterResp := &management.ClusterWatchResponse{ - Event: management.ClusterWatchResponse_UPDATE, - Node: node, - Cluster: snapshotCluster, - } - for c := range s.clusterChans { - c <- *clusterResp - } - } - } else { - // node joined - // notify the cluster changes - clusterResp := &management.ClusterWatchResponse{ - Event: management.ClusterWatchResponse_JOIN, - Node: node, - Cluster: snapshotCluster, - } - for c := range s.clusterChans { - c <- *clusterResp - } - } - } - - // check left nodes - for id, node := range savedCluster.Nodes { - if _, exist := snapshotCluster.Nodes[id]; !exist { - // node left - // notify the cluster changes - clusterResp := &management.ClusterWatchResponse{ - Event: management.ClusterWatchResponse_LEAVE, - Node: node, - Cluster: snapshotCluster, - } - for c := range s.clusterChans { - c <- *clusterResp - } - } - } - - savedCluster = snapshotCluster - default: - time.Sleep(100 * time.Millisecond) - } - } -} - -func (s *GRPCService) stopUpdateCluster() { - s.logger.Info("close all peer clients") - for id, client := range s.peerClients { - s.logger.Debug("close peer client", zap.String("id", id), zap.String("address", client.GetAddress())) - err := client.Close() - if err != nil { - s.logger.Warn(err.Error()) - } - } - - if s.updateClusterStopCh != nil { - s.logger.Info("send a request to stop updating a cluster") - close(s.updateClusterStopCh) - } - - s.logger.Info("wait for the cluster update to stop") - <-s.updateClusterDoneCh - s.logger.Info("the cluster update has been stopped") -} - -func (s *GRPCService) NodeHealthCheck(ctx context.Context, req *management.NodeHealthCheckRequest) (*management.NodeHealthCheckResponse, error) { - resp := &management.NodeHealthCheckResponse{} - - switch req.Probe { - case management.NodeHealthCheckRequest_UNKNOWN: - fallthrough - case management.NodeHealthCheckRequest_HEALTHINESS: - resp.State = management.NodeHealthCheckResponse_HEALTHY - case management.NodeHealthCheckRequest_LIVENESS: - resp.State = management.NodeHealthCheckResponse_ALIVE - case management.NodeHealthCheckRequest_READINESS: - resp.State = management.NodeHealthCheckResponse_READY - default: - err := errors.New("unknown probe") - s.logger.Error(err.Error()) - return resp, status.Error(codes.InvalidArgument, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) NodeID() string { - return s.raftServer.NodeID() -} - -func (s *GRPCService) getSelfNode() *management.Node { - node := s.raftServer.node - - switch s.raftServer.State() { - case raft.Follower: - node.State = management.Node_FOLLOWER - case raft.Candidate: - node.State = management.Node_CANDIDATE - case raft.Leader: - node.State = management.Node_LEADER - case raft.Shutdown: - node.State = management.Node_SHUTDOWN - default: - node.State = management.Node_UNKNOWN - } - - return node -} - -func (s *GRPCService) getPeerNode(id string) (*management.Node, error) { - if _, exist := s.peerClients[id]; !exist { - err := errors.New("node does not exist in peers") - s.logger.Debug(err.Error(), zap.String("id", id)) - return nil, err - } - - req := &empty.Empty{} - resp, err := s.peerClients[id].NodeInfo(req) - if err != nil { - s.logger.Debug(err.Error(), zap.String("id", id)) - return &management.Node{ - BindAddress: "", - State: management.Node_SHUTDOWN, - Metadata: &management.Metadata{ - GrpcAddress: "", - HttpAddress: "", - }, - }, nil - } - - return resp.Node, nil -} - -func (s *GRPCService) getNode(id string) (*management.Node, error) { - if id == "" || id == s.NodeID() { - return s.getSelfNode(), nil - } else { - return s.getPeerNode(id) - } -} - -func (s *GRPCService) NodeInfo(ctx context.Context, req *empty.Empty) (*management.NodeInfoResponse, error) { - resp := &management.NodeInfoResponse{} - - node, err := s.getNode(s.NodeID()) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return &management.NodeInfoResponse{ - Node: node, - }, nil -} - -func (s *GRPCService) setNode(node *management.Node) error { - if s.raftServer.IsLeader() { - err := s.raftServer.SetNode(node) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - req := &management.ClusterJoinRequest{ - Node: node, - } - - _, err = client.ClusterJoin(req) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } - - return nil -} - -func (s *GRPCService) ClusterJoin(ctx context.Context, req *management.ClusterJoinRequest) (*empty.Empty, error) { - resp := &empty.Empty{} - - err := s.setNode(req.Node) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) deleteNode(id string) error { - if s.raftServer.IsLeader() { - err := s.raftServer.DeleteNode(id) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - req := &management.ClusterLeaveRequest{ - Id: id, - } - - _, err = client.ClusterLeave(req) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } - - return nil -} - -func (s *GRPCService) ClusterLeave(ctx context.Context, req *management.ClusterLeaveRequest) (*empty.Empty, error) { - resp := &empty.Empty{} - - err := s.deleteNode(req.Id) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) getCluster() (*management.Cluster, error) { - cluster, err := s.raftServer.GetCluster() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - // update latest node state - for id := range cluster.Nodes { - node, err := s.getNode(id) - if err != nil { - s.logger.Debug(err.Error()) - continue - } - cluster.Nodes[id] = node - } - - return cluster, nil -} - -func (s *GRPCService) ClusterInfo(ctx context.Context, req *empty.Empty) (*management.ClusterInfoResponse, error) { - resp := &management.ClusterInfoResponse{} - - cluster, err := s.getCluster() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.Cluster = cluster - - return resp, nil -} - -func (s *GRPCService) ClusterWatch(req *empty.Empty, server management.Management_ClusterWatchServer) error { - chans := make(chan management.ClusterWatchResponse) - - s.clusterMutex.Lock() - s.clusterChans[chans] = struct{}{} - s.clusterMutex.Unlock() - - defer func() { - s.clusterMutex.Lock() - delete(s.clusterChans, chans) - s.clusterMutex.Unlock() - close(chans) - }() - - for resp := range chans { - err := server.Send(&resp) - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - } - - return nil -} - -func (s *GRPCService) Get(ctx context.Context, req *management.GetRequest) (*management.GetResponse, error) { - s.stateMutex.RLock() - defer func() { - s.stateMutex.RUnlock() - }() - - resp := &management.GetResponse{} - - value, err := s.raftServer.GetValue(req.Key) - if err != nil { - switch err { - case blasterrors.ErrNotFound: - s.logger.Debug(err.Error(), zap.String("key", req.Key)) - return resp, status.Error(codes.NotFound, err.Error()) - default: - s.logger.Error(err.Error(), zap.String("key", req.Key)) - return resp, status.Error(codes.Internal, err.Error()) - } - } - - valueAny := &any.Any{} - err = protobuf.UnmarshalAny(value, valueAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.Value = valueAny - - return resp, nil -} - -func (s *GRPCService) Set(ctx context.Context, req *management.SetRequest) (*empty.Empty, error) { - s.stateMutex.Lock() - defer func() { - s.stateMutex.Unlock() - }() - - resp := &empty.Empty{} - - if s.raftServer.IsLeader() { - value, err := protobuf.MarshalAny(req.Value) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - err = s.raftServer.SetValue(req.Key, value) - if err != nil { - s.logger.Error(err.Error()) - switch err { - case blasterrors.ErrNotFound: - return resp, status.Error(codes.NotFound, err.Error()) - default: - return resp, status.Error(codes.Internal, err.Error()) - } - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - resp, err = client.Set(req) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } - - // notify - for c := range s.stateChans { - c <- management.WatchResponse{ - Command: management.WatchResponse_SET, - Key: req.Key, - Value: req.Value, - } - } - - return resp, nil -} - -func (s *GRPCService) Delete(ctx context.Context, req *management.DeleteRequest) (*empty.Empty, error) { - s.stateMutex.Lock() - defer func() { - s.stateMutex.Unlock() - }() - - resp := &empty.Empty{} - - if s.raftServer.IsLeader() { - err := s.raftServer.DeleteValue(req.Key) - if err != nil { - switch err { - case blasterrors.ErrNotFound: - s.logger.Debug(err.Error(), zap.String("key", req.Key)) - return resp, status.Error(codes.NotFound, err.Error()) - default: - s.logger.Error(err.Error(), zap.String("key", req.Key)) - return resp, status.Error(codes.Internal, err.Error()) - } - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - resp, err = client.Delete(req) - if err != nil { - switch err { - case blasterrors.ErrNotFound: - s.logger.Debug(err.Error(), zap.String("key", req.Key)) - return resp, status.Error(codes.NotFound, err.Error()) - default: - s.logger.Error(err.Error(), zap.String("key", req.Key)) - return resp, status.Error(codes.Internal, err.Error()) - } - } - } - - // notify - for c := range s.stateChans { - c <- management.WatchResponse{ - Command: management.WatchResponse_DELETE, - Key: req.Key, - } - } - - return resp, nil -} - -func (s *GRPCService) Watch(req *management.WatchRequest, server management.Management_WatchServer) error { - chans := make(chan management.WatchResponse) - - s.stateMutex.Lock() - s.stateChans[chans] = struct{}{} - s.stateMutex.Unlock() - - defer func() { - s.stateMutex.Lock() - delete(s.stateChans, chans) - s.stateMutex.Unlock() - close(chans) - }() - - // normalize key - key := func(key string) string { - keys := make([]string, 0) - for _, k := range strings.Split(key, "/") { - if k != "" { - keys = append(keys, k) - } - } - return strings.Join(keys, "/") - }(req.Key) - - for resp := range chans { - if !strings.HasPrefix(resp.Key, key) { - continue - } - err := server.Send(&resp) - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - } - - return nil -} - -func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { - s.stateMutex.Lock() - defer func() { - s.stateMutex.Unlock() - }() - - resp := &empty.Empty{} - - err := s.raftServer.Snapshot() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} diff --git a/manager/http_handler.go b/manager/http_handler.go deleted file mode 100644 index 0ceb447..0000000 --- a/manager/http_handler.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "net/http" - "time" - - "github.com/gorilla/mux" - blasthttp "github.com/mosuka/blast/http" - "github.com/mosuka/blast/version" - "github.com/prometheus/client_golang/prometheus/promhttp" - "go.uber.org/zap" -) - -type Router struct { - mux.Router - - logger *zap.Logger -} - -func NewRouter(logger *zap.Logger) (*Router, error) { - router := &Router{ - logger: logger, - } - - router.StrictSlash(true) - - router.Handle("/", NewRootHandler(logger)).Methods("GET") - router.Handle("/metrics", promhttp.Handler()).Methods("GET") - - return router, nil -} - -func (r *Router) Close() error { - return nil -} - -type RootHandler struct { - logger *zap.Logger -} - -func NewRootHandler(logger *zap.Logger) *RootHandler { - return &RootHandler{ - logger: logger, - } -} - -func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - msgMap := map[string]interface{}{ - "version": version.Version, - "status": status, - } - - content, err := blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} diff --git a/manager/http_server.go b/manager/http_server.go deleted file mode 100644 index 33bd0fc..0000000 --- a/manager/http_server.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "net" - "net/http" - - accesslog "github.com/mash/go-accesslog" - "go.uber.org/zap" -) - -type HTTPServer struct { - listener net.Listener - router *Router - - logger *zap.Logger - httpLogger accesslog.Logger -} - -func NewHTTPServer(httpAddr string, router *Router, logger *zap.Logger, httpLogger accesslog.Logger) (*HTTPServer, error) { - listener, err := net.Listen("tcp", httpAddr) - if err != nil { - return nil, err - } - - return &HTTPServer{ - listener: listener, - router: router, - logger: logger, - httpLogger: httpLogger, - }, nil -} - -func (s *HTTPServer) Start() error { - err := http.Serve( - s.listener, - accesslog.NewLoggingHandler( - s.router, - s.httpLogger, - ), - ) - if err != nil { - return err - } - - return nil -} - -func (s *HTTPServer) Stop() error { - err := s.listener.Close() - if err != nil { - return err - } - - return nil -} diff --git a/manager/raft_fsm.go b/manager/raft_fsm.go deleted file mode 100644 index bfd859f..0000000 --- a/manager/raft_fsm.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "encoding/json" - "errors" - "io" - "io/ioutil" - "sync" - - "github.com/gogo/protobuf/proto" - "github.com/hashicorp/raft" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/maputils" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/management" - "go.uber.org/zap" -) - -type RaftFSM struct { - path string - logger *zap.Logger - - cluster *management.Cluster - clusterMutex sync.RWMutex - - data maputils.Map - dataMutex sync.RWMutex -} - -func NewRaftFSM(path string, logger *zap.Logger) (*RaftFSM, error) { - return &RaftFSM{ - path: path, - logger: logger, - }, nil -} - -func (f *RaftFSM) Start() error { - f.logger.Info("initialize cluster") - f.cluster = &management.Cluster{Nodes: make(map[string]*management.Node, 0)} - - f.logger.Info("initialize store data") - f.data = maputils.Map{} - - return nil -} - -func (f *RaftFSM) Stop() error { - return nil -} - -func (f *RaftFSM) GetNode(nodeId string) (*management.Node, error) { - f.clusterMutex.RLock() - defer f.clusterMutex.RUnlock() - - node, ok := f.cluster.Nodes[nodeId] - if !ok { - return nil, blasterrors.ErrNotFound - } - - return node, nil -} - -func (f *RaftFSM) SetNode(node *management.Node) error { - f.clusterMutex.RLock() - defer f.clusterMutex.RUnlock() - - f.cluster.Nodes[node.Id] = node - - return nil -} - -func (f *RaftFSM) DeleteNode(nodeId string) error { - f.clusterMutex.RLock() - defer f.clusterMutex.RUnlock() - - if _, ok := f.cluster.Nodes[nodeId]; !ok { - return blasterrors.ErrNotFound - } - - delete(f.cluster.Nodes, nodeId) - - return nil -} - -func (f *RaftFSM) GetValue(key string) (interface{}, error) { - // get raw data - value, err := f.data.Get(key) - if err != nil { - switch err { - case maputils.ErrNotFound: - f.logger.Debug("key does not found in the store data", zap.String("key", key)) - return nil, blasterrors.ErrNotFound - default: - f.logger.Error(err.Error(), zap.String("key", key)) - return nil, err - } - } - - return value, nil -} - -func (f *RaftFSM) SetValue(key string, value interface{}, merge bool) error { - if merge { - err := f.data.Merge(key, value) - if err != nil { - f.logger.Error(err.Error(), zap.String("key", key), zap.Any("value", value), zap.Bool("merge", merge)) - return err - } - } else { - err := f.data.Set(key, value) - if err != nil { - f.logger.Error(err.Error(), zap.String("key", key), zap.Any("value", value), zap.Bool("merge", merge)) - return err - } - } - - return nil -} - -func (f *RaftFSM) DeleteValue(key string) error { - err := f.data.Delete(key) - if err != nil { - switch err { - case maputils.ErrNotFound: - f.logger.Debug("key does not found in the store data", zap.String("key", key)) - return blasterrors.ErrNotFound - default: - f.logger.Error(err.Error(), zap.String("key", key)) - return err - } - } - - return nil -} - -type fsmResponse struct { - error error -} - -func (f *RaftFSM) Apply(l *raft.Log) interface{} { - proposal := &management.Proposal{} - err := proto.Unmarshal(l.Data, proposal) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - switch proposal.Event { - case management.Proposal_SET_NODE: - err = f.SetNode(proposal.Node) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - return &fsmResponse{error: nil} - case management.Proposal_DELETE_NODE: - err = f.DeleteNode(proposal.Node.Id) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - return &fsmResponse{error: nil} - case management.Proposal_SET_VALUE: - value, err := protobuf.MarshalAny(proposal.KeyValue.Value) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - err = f.SetValue(proposal.KeyValue.Key, value, false) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - return &fsmResponse{error: nil} - case management.Proposal_DELETE_VALUE: - err = f.DeleteValue(proposal.KeyValue.Key) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - return &fsmResponse{error: nil} - default: - err = errors.New("unsupported command") - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } -} - -func (f *RaftFSM) Snapshot() (raft.FSMSnapshot, error) { - f.logger.Info("snapshot") - - return &RaftFSMSnapshot{ - data: f.data, - logger: f.logger, - }, nil -} - -func (f *RaftFSM) Restore(rc io.ReadCloser) error { - f.logger.Info("restore") - - defer func() { - err := rc.Close() - if err != nil { - f.logger.Error(err.Error()) - } - }() - - data, err := ioutil.ReadAll(rc) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - err = json.Unmarshal(data, &f.data) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - return nil -} - -type RaftFSMSnapshot struct { - data maputils.Map - logger *zap.Logger -} - -func (f *RaftFSMSnapshot) Persist(sink raft.SnapshotSink) error { - f.logger.Info("persist") - - defer func() { - err := sink.Close() - if err != nil { - f.logger.Error(err.Error()) - } - }() - - buff, err := json.Marshal(f.data) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - _, err = sink.Write(buff) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - return nil -} - -func (f *RaftFSMSnapshot) Release() { - f.logger.Info("release") -} diff --git a/manager/raft_fsm_test.go b/manager/raft_fsm_test.go deleted file mode 100644 index 86f70ba..0000000 --- a/manager/raft_fsm_test.go +++ /dev/null @@ -1,552 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "io/ioutil" - "os" - "reflect" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/mosuka/blast/logutils" - "github.com/mosuka/blast/protobuf/management" -) - -func TestRaftFSM_GetNode(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - err := os.RemoveAll(tmp) - if err != nil { - t.Fatalf("%v", err) - } - }() - - logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) - - fsm, err := NewRaftFSM(tmp, logger) - if err != nil { - t.Fatalf("%v", err) - } - err = fsm.Start() - defer func() { - err := fsm.Stop() - if err != nil { - t.Fatalf("%v", err) - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - _ = fsm.SetNode( - &management.Node{ - Id: "node1", - BindAddress: "2100", - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: "5100", - HttpAddress: "8100", - }, - }, - ) - _ = fsm.SetNode( - &management.Node{ - Id: "node2", - BindAddress: "2110", - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: "5110", - HttpAddress: "8110", - }, - }, - ) - _ = fsm.SetNode( - &management.Node{ - Id: "node3", - BindAddress: "2120", - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: "5120", - HttpAddress: "8120", - }, - }, - ) - - val1, err := fsm.GetNode("node2") - if err != nil { - t.Fatalf("%v", err) - } - - exp1 := &management.Node{ - Id: "node2", - BindAddress: "2110", - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: "5110", - HttpAddress: "8110", - }, - } - - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - -} - -func TestRaftFSM_SetNode(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - err := os.RemoveAll(tmp) - if err != nil { - t.Fatalf("%v", err) - } - }() - - logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) - - fsm, err := NewRaftFSM(tmp, logger) - if err != nil { - t.Fatalf("%v", err) - } - err = fsm.Start() - defer func() { - err := fsm.Stop() - if err != nil { - t.Fatalf("%v", err) - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - _ = fsm.SetNode( - &management.Node{ - Id: "node1", - BindAddress: "2100", - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: "5100", - HttpAddress: "8100", - }, - }, - ) - _ = fsm.SetNode( - &management.Node{ - Id: "node2", - BindAddress: "2110", - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: "5110", - HttpAddress: "8110", - }, - }, - ) - _ = fsm.SetNode( - &management.Node{ - Id: "node3", - BindAddress: "2120", - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: "5120", - HttpAddress: "8120", - }, - }, - ) - - val1, err := fsm.GetNode("node2") - if err != nil { - t.Fatalf("%v", err) - } - exp1 := &management.Node{ - Id: "node2", - BindAddress: "2110", - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: "5110", - HttpAddress: "8110", - }, - } - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - _ = fsm.SetNode( - &management.Node{ - Id: "node2", - BindAddress: "2110", - State: management.Node_SHUTDOWN, - Metadata: &management.Metadata{ - GrpcAddress: "5110", - HttpAddress: "8110", - }, - }, - ) - - val2, err := fsm.GetNode("node2") - if err != nil { - t.Fatalf("%v", err) - } - exp2 := &management.Node{ - Id: "node2", - BindAddress: "2110", - State: management.Node_SHUTDOWN, - Metadata: &management.Metadata{ - GrpcAddress: "5110", - HttpAddress: "8110", - }, - } - - act2 := val2 - if !reflect.DeepEqual(exp2, act2) { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } -} - -func TestRaftFSM_DeleteNode(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - err := os.RemoveAll(tmp) - if err != nil { - t.Fatalf("%v", err) - } - }() - - logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) - - fsm, err := NewRaftFSM(tmp, logger) - if err != nil { - t.Fatalf("%v", err) - } - err = fsm.Start() - defer func() { - err := fsm.Stop() - if err != nil { - t.Fatalf("%v", err) - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - _ = fsm.SetNode( - &management.Node{ - Id: "node1", - BindAddress: "2100", - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: "5100", - HttpAddress: "8100", - }, - }, - ) - _ = fsm.SetNode( - &management.Node{ - Id: "node2", - BindAddress: "2110", - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: "5110", - HttpAddress: "8110", - }, - }, - ) - _ = fsm.SetNode( - &management.Node{ - Id: "node3", - BindAddress: "2120", - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: "5120", - HttpAddress: "8120", - }, - }, - ) - - val1, err := fsm.GetNode("node2") - if err != nil { - t.Fatalf("%v", err) - } - exp1 := &management.Node{ - Id: "node2", - BindAddress: "2110", - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: "5110", - HttpAddress: "8110", - }, - } - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - err = fsm.DeleteNode("node2") - if err != nil { - t.Fatalf("%v", err) - } - - val2, err := fsm.GetNode("node2") - if err == nil { - t.Fatalf("expected error: %v", err) - } - - act1 = val2 - if reflect.DeepEqual(nil, act1) { - t.Fatalf("expected content to see nil, saw %v", act1) - } -} - -func TestRaftFSM_Get(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - err := os.RemoveAll(tmp) - if err != nil { - t.Fatalf("%v", err) - } - }() - - logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) - - fsm, err := NewRaftFSM(tmp, logger) - if err != nil { - t.Fatalf("%v", err) - } - err = fsm.Start() - defer func() { - err := fsm.Stop() - if err != nil { - t.Fatalf("%v", err) - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - err = fsm.SetValue("/", map[string]interface{}{"a": 1}, false) - if err != nil { - t.Fatalf("%v", err) - } - - value, err := fsm.GetValue("/a") - if err != nil { - t.Fatalf("%v", err) - } - - expectedValue := 1 - actualValue := value - if !cmp.Equal(expectedValue, actualValue) { - t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) - } -} - -func TestRaftFSM_Set(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - err := os.RemoveAll(tmp) - if err != nil { - t.Fatalf("%v", err) - } - }() - - logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) - - fsm, err := NewRaftFSM(tmp, logger) - if err != nil { - t.Fatalf("%v", err) - } - err = fsm.Start() - defer func() { - err := fsm.Stop() - if err != nil { - t.Fatalf("%v", err) - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // set {"a": 1} - err = fsm.SetValue("/", map[string]interface{}{"a": 1}, false) - if err != nil { - t.Fatalf("%v", err) - } - val1, err := fsm.GetValue("/") - if err != nil { - t.Fatalf("%v", err) - } - exp1 := map[string]interface{}{"a": 1} - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - // merge {"a": "A"} - _ = fsm.SetValue("/", map[string]interface{}{"a": "A"}, true) - val2, err := fsm.GetValue("/") - if err != nil { - t.Fatalf("%v", err) - } - exp2 := map[string]interface{}{"a": "A"} - act2 := val2 - if !reflect.DeepEqual(exp2, act2) { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } - - // set {"a": {"b": "AB"}} - err = fsm.SetValue("/", map[string]interface{}{"a": map[string]interface{}{"b": "AB"}}, false) - if err != nil { - t.Fatalf("%v", err) - } - - val3, err := fsm.GetValue("/") - if err != nil { - t.Fatalf("%v", err) - } - exp3 := map[string]interface{}{"a": map[string]interface{}{"b": "AB"}} - act3 := val3 - if !reflect.DeepEqual(exp3, act3) { - t.Fatalf("expected content to see %v, saw %v", exp3, act3) - } - - // merge {"a": {"c": "AC"}} - err = fsm.SetValue("/", map[string]interface{}{"a": map[string]interface{}{"c": "AC"}}, true) - if err != nil { - t.Fatalf("%v", err) - } - val4, err := fsm.GetValue("/") - if err != nil { - t.Fatalf("%v", err) - } - exp4 := map[string]interface{}{"a": map[string]interface{}{"b": "AB", "c": "AC"}} - act4 := val4 - if !reflect.DeepEqual(exp4, act4) { - t.Fatalf("expected content to see %v, saw %v", exp4, act4) - } - - // set {"a": 1} - err = fsm.SetValue("/", map[string]interface{}{"a": 1}, false) - if err != nil { - t.Fatalf("%v", err) - } - val5, err := fsm.GetValue("/") - if err != nil { - t.Fatalf("%v", err) - } - exp5 := map[string]interface{}{"a": 1} - act5 := val5 - if !reflect.DeepEqual(exp5, act5) { - t.Fatalf("expected content to see %v, saw %v", exp5, act5) - } - - // TODO: merge {"a": {"c": "AC"}} - //fsm.applySet("/", map[string]interface{}{ - // "a": map[string]interface{}{ - // "c": "AC", - // }, - //}, true) - //val6, err := fsm.Get("/") - //if err != nil { - // t.Fatalf("%v", err) - //} - //exp6 := map[string]interface{}{ - // "a": map[string]interface{}{ - // "c": "AC", - // }, - //} - //act6 := val6 - //if !reflect.DeepEqual(exp6, act6) { - // t.Fatalf("expected content to see %v, saw %v", exp6, act6) - //} -} - -func TestRaftFSM_Delete(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - err := os.RemoveAll(tmp) - if err != nil { - t.Fatalf("%v", err) - } - }() - - logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) - - fsm, err := NewRaftFSM(tmp, logger) - if err != nil { - t.Fatalf("%v", err) - } - err = fsm.Start() - defer func() { - err := fsm.Stop() - if err != nil { - t.Fatalf("%v", err) - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // set {"a": 1} - err = fsm.SetValue("/", map[string]interface{}{"a": 1}, false) - if err != nil { - t.Fatalf("%v", err) - } - - value, err := fsm.GetValue("/a") - if err != nil { - t.Fatalf("%v", err) - } - - expectedValue := 1 - actualValue := value - if !cmp.Equal(expectedValue, actualValue) { - t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) - } - - err = fsm.DeleteValue("/a") - if err != nil { - t.Fatalf("%v", err) - } - - value, err = fsm.GetValue("/a") - if err == nil { - t.Fatalf("expected nil: %v", err) - } - - actualValue = value - if nil != actualValue { - t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) - } -} diff --git a/manager/raft_server.go b/manager/raft_server.go deleted file mode 100644 index 7918433..0000000 --- a/manager/raft_server.go +++ /dev/null @@ -1,641 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "encoding/json" - "errors" - "io/ioutil" - "net" - "os" - "path/filepath" - "sync" - "time" - - "github.com/blevesearch/bleve/mapping" - "github.com/gogo/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" - "github.com/hashicorp/raft" - raftboltdb "github.com/hashicorp/raft-boltdb" - _ "github.com/mosuka/blast/builtins" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/management" - "go.uber.org/zap" - //raftmdb "github.com/hashicorp/raft-mdb" -) - -type RaftServer struct { - node *management.Node - dataDir string - raftStorageType string - indexMapping *mapping.IndexMappingImpl - indexType string - indexStorageType string - bootstrap bool - logger *zap.Logger - - transport *raft.NetworkTransport - raft *raft.Raft - fsm *RaftFSM - mu sync.RWMutex -} - -func NewRaftServer(node *management.Node, dataDir string, raftStorageType string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { - return &RaftServer{ - node: node, - dataDir: dataDir, - raftStorageType: raftStorageType, - indexMapping: indexMapping, - indexType: indexType, - indexStorageType: indexStorageType, - bootstrap: bootstrap, - logger: logger, - }, nil -} - -func (s *RaftServer) Start() error { - var err error - - fsmPath := filepath.Join(s.dataDir, "store") - s.logger.Info("create finite state machine", zap.String("path", fsmPath)) - s.fsm, err = NewRaftFSM(fsmPath, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("start finite state machine") - err = s.fsm.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("create Raft config", zap.String("id", s.node.Id)) - raftConfig := raft.DefaultConfig() - raftConfig.LocalID = raft.ServerID(s.node.Id) - raftConfig.SnapshotThreshold = 1024 - raftConfig.LogOutput = ioutil.Discard - //if s.bootstrap { - // raftConfig.StartAsLeader = true - //} - - s.logger.Info("resolve TCP address", zap.String("bind_addr", s.node.BindAddress)) - addr, err := net.ResolveTCPAddr("tcp", s.node.BindAddress) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("create TCP transport", zap.String("bind_addr", s.node.BindAddress)) - s.transport, err = raft.NewTCPTransport(s.node.BindAddress, addr, 3, 10*time.Second, ioutil.Discard) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - snapshotPath := s.dataDir - s.logger.Info("create snapshot store", zap.String("path", snapshotPath)) - snapshotStore, err := raft.NewFileSnapshotStore(snapshotPath, 2, ioutil.Discard) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("create Raft machine") - var logStore raft.LogStore - var stableStore raft.StableStore - switch s.raftStorageType { - case "boltdb": - logStorePath := filepath.Join(s.dataDir, "raft", "log", "boltdb.db") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Dir(logStorePath), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - logStore, err = raftboltdb.NewBoltStore(logStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStorePath := filepath.Join(s.dataDir, "raft", "stable", "boltdb.db") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Dir(stableStorePath), 0755) - stableStore, err = raftboltdb.NewBoltStore(stableStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - //case "badger": - // logStorePath := filepath.Join(s.dataDir, "raft", "log") - // s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) - // err = os.MkdirAll(filepath.Join(logStorePath, "badger"), 0755) - // if err != nil { - // s.logger.Fatal(err.Error()) - // return err - // } - // logStore, err = raftbadgerdb.NewBadgerStore(logStorePath) - // if err != nil { - // s.logger.Fatal(err.Error()) - // return err - // } - // stableStorePath := filepath.Join(s.dataDir, "raft", "stable") - // s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) - // err = os.MkdirAll(filepath.Join(stableStorePath, "badger"), 0755) - // if err != nil { - // s.logger.Fatal(err.Error()) - // return err - // } - // stableStore, err = raftbadgerdb.NewBadgerStore(stableStorePath) - // if err != nil { - // s.logger.Fatal(err.Error()) - // return err - // } - default: - logStorePath := filepath.Join(s.dataDir, "raft", "log", "boltdb.db") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Dir(logStorePath), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - logStore, err = raftboltdb.NewBoltStore(logStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStorePath := filepath.Join(s.dataDir, "raft", "stable", "boltdb.db") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Dir(stableStorePath), 0755) - stableStore, err = raftboltdb.NewBoltStore(stableStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - } - - s.logger.Info("create Raft machine") - s.raft, err = raft.NewRaft(raftConfig, s.fsm, logStore, stableStore, snapshotStore, s.transport) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - if s.bootstrap { - s.logger.Info("configure Raft machine as bootstrap") - configuration := raft.Configuration{ - Servers: []raft.Server{ - { - ID: raftConfig.LocalID, - Address: s.transport.LocalAddr(), - }, - }, - } - s.raft.BootstrapCluster(configuration) - - s.logger.Info("wait for become a leader") - err = s.WaitForDetectLeader(60 * time.Second) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - // set node config - s.logger.Info("register its own node config", zap.Any("node", s.node)) - err = s.setNode(s.node) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - // set index config - s.logger.Info("register index config") - b, err := json.Marshal(s.indexMapping) - if err != nil { - s.logger.Error(err.Error()) - return err - } - var indexMappingMap map[string]interface{} - err = json.Unmarshal(b, &indexMappingMap) - if err != nil { - s.logger.Error(err.Error()) - return err - } - indexConfig := map[string]interface{}{ - "index_mapping": indexMappingMap, - "index_type": s.indexType, - "index_storage_type": s.indexStorageType, - } - err = s.SetValue("index_config", indexConfig) - if err != nil { - s.logger.Error(err.Error(), zap.String("key", "index_config")) - return err - } - } - - return nil -} - -func (s *RaftServer) Stop() error { - s.logger.Info("shutdown Raft machine") - f := s.raft.Shutdown() - err := f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - s.logger.Info("stop finite state machine") - err = s.fsm.Stop() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) LeaderAddress(timeout time.Duration) (raft.ServerAddress, error) { - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - - timer := time.NewTimer(timeout) - defer timer.Stop() - - for { - select { - case <-ticker.C: - leaderAddr := s.raft.Leader() - if leaderAddr != "" { - s.logger.Debug("detect a leader", zap.String("address", string(leaderAddr))) - return leaderAddr, nil - } - case <-timer.C: - s.logger.Error("timeout exceeded") - return "", blasterrors.ErrTimeout - } - } -} - -func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { - leaderAddr, err := s.LeaderAddress(timeout) - if err != nil { - s.logger.Error(err.Error()) - return "", err - } - - cf := s.raft.GetConfiguration() - err = cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return "", err - } - - for _, server := range cf.Configuration().Servers { - if server.Address == leaderAddr { - return server.ID, nil - } - } - - s.logger.Error(blasterrors.ErrNotFoundLeader.Error()) - return "", blasterrors.ErrNotFoundLeader -} - -func (s *RaftServer) NodeAddress() string { - return string(s.transport.LocalAddr()) -} - -func (s *RaftServer) NodeID() string { - return s.node.Id -} - -func (s *RaftServer) Stats() map[string]string { - return s.raft.Stats() -} - -func (s *RaftServer) State() raft.RaftState { - return s.raft.State() -} - -func (s *RaftServer) IsLeader() bool { - return s.State() == raft.Leader -} - -func (s *RaftServer) WaitForDetectLeader(timeout time.Duration) error { - _, err := s.LeaderAddress(timeout) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) getNode(nodeId string) (*management.Node, error) { - nodeConfig, err := s.fsm.GetNode(nodeId) - if err != nil { - s.logger.Debug(err.Error(), zap.String("id", nodeId)) - return nil, err - } - - return nodeConfig, nil -} - -func (s *RaftServer) setNode(node *management.Node) error { - proposal := &management.Proposal{ - Event: management.Proposal_SET_NODE, - Node: node, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) - return err - } - - return nil -} - -func (s *RaftServer) deleteNode(nodeId string) error { - proposal := &management.Proposal{ - Event: management.Proposal_DELETE_NODE, - Node: &management.Node{ - Id: nodeId, - }, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err - } - - return nil -} - -func (s *RaftServer) GetNode(id string) (*management.Node, error) { - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - var node *management.Node - for _, server := range cf.Configuration().Servers { - if server.ID == raft.ServerID(id) { - node, err = s.getNode(id) - if err != nil { - s.logger.Debug(err.Error(), zap.String("id", id)) - return nil, err - } - break - } - } - - return node, nil -} - -func (s *RaftServer) SetNode(node *management.Node) error { - if !s.IsLeader() { - s.logger.Warn(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - for _, server := range cf.Configuration().Servers { - if server.ID == raft.ServerID(node.Id) { - s.logger.Info("node already joined the cluster", zap.Any("id", node.Id)) - return nil - } - } - - if node.BindAddress == "" { - err = errors.New("missing bind address") - s.logger.Error(err.Error(), zap.String("bind_addr", node.BindAddress)) - return err - } - - // add node to Raft cluster - s.logger.Info("join the node to the raft cluster", zap.String("id", node.Id), zap.Any("bind_address", node.BindAddress)) - f := s.raft.AddVoter(raft.ServerID(node.Id), raft.ServerAddress(node.BindAddress), 0, 0) - err = f.Error() - if err != nil { - s.logger.Error(err.Error(), zap.String("id", node.Id), zap.String("bind_address", node.BindAddress)) - return err - } - - // set node config - err = s.setNode(node) - if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) - return err - } - - return nil -} - -func (s *RaftServer) DeleteNode(nodeId string) error { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err - } - - // delete node from Raft cluster - for _, server := range cf.Configuration().Servers { - if server.ID == raft.ServerID(nodeId) { - s.logger.Info("remove the node from the raft cluster", zap.String("id", nodeId)) - f := s.raft.RemoveServer(server.ID, 0, 0) - err = f.Error() - if err != nil { - s.logger.Error(err.Error(), zap.String("id", string(server.ID))) - return err - } - } - } - - // delete node config - err = s.deleteNode(nodeId) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err - } - - return nil -} - -func (s *RaftServer) GetCluster() (*management.Cluster, error) { - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - cluster := &management.Cluster{Nodes: make(map[string]*management.Node, 0)} - for _, server := range cf.Configuration().Servers { - node, err := s.GetNode(string(server.ID)) - if err != nil { - s.logger.Debug(err.Error(), zap.String("id", string(server.ID))) - continue - } - - cluster.Nodes[string(server.ID)] = node - } - - return cluster, nil -} - -func (s *RaftServer) Snapshot() error { - f := s.raft.Snapshot() - err := f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) GetValue(key string) (interface{}, error) { - value, err := s.fsm.GetValue(key) - if err != nil { - switch err { - case blasterrors.ErrNotFound: - s.logger.Debug(err.Error(), zap.String("key", key)) - default: - s.logger.Error(err.Error(), zap.String("key", key)) - } - return nil, err - } - - return value, nil -} - -func (s *RaftServer) SetValue(key string, value interface{}) error { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - valueAny := &any.Any{} - err := protobuf.UnmarshalAny(value, valueAny) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - proposal := &management.Proposal{ - Event: management.Proposal_SET_VALUE, - KeyValue: &management.KeyValue{ - Key: key, - Value: valueAny, - }, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) DeleteValue(key string) error { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - proposal := &management.Proposal{ - Event: management.Proposal_DELETE_VALUE, - KeyValue: &management.KeyValue{ - Key: key, - }, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - switch err { - case blasterrors.ErrNotFound: - s.logger.Debug(err.Error(), zap.String("key", key)) - default: - s.logger.Error(err.Error(), zap.String("key", key)) - } - return err - } - - return nil -} diff --git a/manager/server.go b/manager/server.go deleted file mode 100644 index 909b4fc..0000000 --- a/manager/server.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "github.com/blevesearch/bleve/mapping" - accesslog "github.com/mash/go-accesslog" - "github.com/mosuka/blast/protobuf/management" - "go.uber.org/zap" -) - -type Server struct { - peerGrpcAddr string - node *management.Node - dataDir string - raftStorageType string - indexMapping *mapping.IndexMappingImpl - indexType string - indexStorageType string - logger *zap.Logger - grpcLogger *zap.Logger - httpLogger accesslog.Logger - - raftServer *RaftServer - grpcService *GRPCService - grpcServer *GRPCServer - grpcGateway *GRPCGateway - httpRouter *Router - httpServer *HTTPServer -} - -func NewServer(peerGrpcAddr string, node *management.Node, dataDir string, raftStorageType string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { - return &Server{ - peerGrpcAddr: peerGrpcAddr, - node: node, - dataDir: dataDir, - raftStorageType: raftStorageType, - indexMapping: indexMapping, - indexType: indexType, - indexStorageType: indexStorageType, - logger: logger, - grpcLogger: grpcLogger, - httpLogger: httpLogger, - }, nil -} - -func (s *Server) Start() { - var err error - - // bootstrap node? - bootstrap := s.peerGrpcAddr == "" - s.logger.Info("bootstrap", zap.Bool("bootstrap", bootstrap)) - - // create raft server - s.raftServer, err = NewRaftServer(s.node, s.dataDir, s.raftStorageType, s.indexMapping, s.indexType, s.indexStorageType, bootstrap, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC service - s.grpcService, err = NewGRPCService(s.raftServer, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC server - s.grpcServer, err = NewGRPCServer(s.node.Metadata.GrpcAddress, s.grpcService, s.grpcLogger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC gateway - s.grpcGateway, err = NewGRPCGateway(s.node.Metadata.GrpcGatewayAddress, s.node.Metadata.GrpcAddress, s.logger) - if err != nil { - s.logger.Error(err.Error()) - return - } - - // create HTTP router - s.httpRouter, err = NewRouter(s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create HTTP server - s.httpServer, err = NewHTTPServer(s.node.Metadata.HttpAddress, s.httpRouter, s.logger, s.httpLogger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // start Raft server - s.logger.Info("start Raft server") - err = s.raftServer.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // start gRPC service - s.logger.Info("start gRPC service") - go func() { - err := s.grpcService.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - - // start gRPC server - s.logger.Info("start gRPC server") - go func() { - err := s.grpcServer.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - - // start gRPC gateway - s.logger.Info("start gRPC gateway") - go func() { - _ = s.grpcGateway.Start() - }() - - // start HTTP server - s.logger.Info("start HTTP server") - go func() { - _ = s.httpServer.Start() - }() - - // join to the existing cluster - if !bootstrap { - client, err := NewGRPCClient(s.peerGrpcAddr) - defer func() { - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - }() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - req := &management.ClusterJoinRequest{ - Node: s.node, - } - - _, err = client.ClusterJoin(req) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - } -} - -func (s *Server) Stop() { - s.logger.Info("stop HTTP server") - err := s.httpServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop HTTP router") - err = s.httpRouter.Close() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC gateway") - err = s.grpcGateway.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC server") - err = s.grpcServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC service") - err = s.grpcService.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop Raft server") - err = s.raftServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } -} - -func (s *Server) BindAddress() string { - return s.raftServer.NodeAddress() -} - -func (s *Server) GrpcAddress() string { - address, err := s.grpcServer.GetAddress() - if err != nil { - return "" - } - - return address -} - -func (s *Server) HttpAddress() string { - address, err := s.grpcGateway.GetAddress() - if err != nil { - return "" - } - - return address -} diff --git a/manager/server_test.go b/manager/server_test.go deleted file mode 100644 index 0b863c3..0000000 --- a/manager/server_test.go +++ /dev/null @@ -1,2774 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "fmt" - "os" - "path/filepath" - "reflect" - "testing" - "time" - - "github.com/golang/protobuf/ptypes/any" - "github.com/golang/protobuf/ptypes/empty" - "github.com/google/go-cmp/cmp" - "github.com/mosuka/blast/indexutils" - "github.com/mosuka/blast/logutils" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/management" - "github.com/mosuka/blast/strutils" - "github.com/mosuka/blast/testutils" -) - -func TestServer_Start(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &management.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) -} - -func TestServer_HealthCheck(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &management.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // healthiness - reqHealthiness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_HEALTHINESS} - resHealthiness, err := client.NodeHealthCheck(reqHealthiness) - if err != nil { - t.Fatalf("%v", err) - } - expHealthiness := management.NodeHealthCheckResponse_HEALTHY - actHealthiness := resHealthiness.State - if expHealthiness != actHealthiness { - t.Fatalf("expected content to see %v, saw %v", expHealthiness, actHealthiness) - } - - // liveness - reqLiveness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_LIVENESS} - resLiveness, err := client.NodeHealthCheck(reqLiveness) - if err != nil { - t.Fatalf("%v", err) - } - expLiveness := management.NodeHealthCheckResponse_ALIVE - actLiveness := resLiveness.State - if expLiveness != actLiveness { - t.Fatalf("expected content to see %v, saw %v", expLiveness, actLiveness) - } - - // readiness - reqReadiness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_READINESS} - resReadiness, err := client.NodeHealthCheck(reqReadiness) - if err != nil { - t.Fatalf("%v", err) - } - expReadiness := management.NodeHealthCheckResponse_READY - actReadiness := resReadiness.State - if expReadiness != actReadiness { - t.Fatalf("expected content to see %v, saw %v", expReadiness, actReadiness) - } -} - -func TestServer_GetNode(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewawyAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &management.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewawyAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get node - res, err := client.NodeInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expNodeInfo := &management.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewawyAddress, - HttpAddress: httpAddress, - }, - } - actNodeInfo := res.Node - if !reflect.DeepEqual(expNodeInfo, actNodeInfo) { - t.Fatalf("expected content to see %v, saw %v", expNodeInfo, actNodeInfo) - } -} - -func TestServer_GetCluster(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &management.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get cluster - res, err := client.ClusterInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expCluster := &management.Cluster{ - Nodes: map[string]*management.Node{ - nodeId: { - Id: nodeId, - BindAddress: bindAddress, - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - }, - }, - } - actCluster := res.Cluster - if !reflect.DeepEqual(expCluster, actCluster) { - t.Fatalf("expected content to see %v, saw %v", expCluster, actCluster) - } -} - -func TestServer_Set(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &management.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // set value - valueAny := &any.Any{} - err = protobuf.UnmarshalAny("val1", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq := &management.SetRequest{ - Key: "test/key1", - Value: valueAny, - } - _, err = client.Set(setReq) - if err != nil { - t.Fatalf("%v", err) - } - - // get value - getReq := &management.GetRequest{ - Key: "test/key1", - } - getRes, err := client.Get(getReq) - if err != nil { - t.Fatalf("%v", err) - } - - expVal1 := "val1" - - val1, err := protobuf.MarshalAny(getRes.Value) - actVal1 := *val1.(*string) - - if !cmp.Equal(expVal1, actVal1) { - t.Fatalf("expected content to see %v, saw %v", expVal1, actVal1) - } -} - -func TestServer_Get(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &management.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // set value - valueAny := &any.Any{} - err = protobuf.UnmarshalAny("val1", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq := &management.SetRequest{ - Key: "test/key1", - Value: valueAny, - } - _, err = client.Set(setReq) - if err != nil { - t.Fatalf("%v", err) - } - - // get value - getReq := &management.GetRequest{Key: "test/key1"} - getRes, err := client.Get(getReq) - if err != nil { - t.Fatalf("%v", err) - } - - expVal1 := "val1" - - val1, err := protobuf.MarshalAny(getRes.Value) - actVal1 := *val1.(*string) - - if !cmp.Equal(expVal1, actVal1) { - t.Fatalf("expected content to see %v, saw %v", expVal1, actVal1) - } -} - -func TestServer_Delete(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &management.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // set value - valueAny := &any.Any{} - if err != nil { - t.Fatalf("%v", err) - } - err = protobuf.UnmarshalAny("val1", valueAny) - setReq := &management.SetRequest{ - Key: "test/key1", - Value: valueAny, - } - _, err = client.Set(setReq) - if err != nil { - t.Fatalf("%v", err) - } - - // get value - getReq := &management.GetRequest{ - Key: "test/key1", - } - res, err := client.Get(getReq) - if err != nil { - t.Fatalf("%v", err) - } - - expVal1 := "val1" - - val1, err := protobuf.MarshalAny(res.Value) - actVal1 := *val1.(*string) - - if !cmp.Equal(expVal1, actVal1) { - t.Fatalf("expected content to see %v, saw %v", expVal1, actVal1) - } - - // delete value - deleteReq := &management.DeleteRequest{ - Key: "test/key1", - } - _, err = client.Delete(deleteReq) - if err != nil { - t.Fatalf("%v", err) - } - - // delete non-existing data - deleteNonExistingReq := &management.DeleteRequest{ - Key: "test/non-existing", - } - _, err = client.Delete(deleteNonExistingReq) - if err != nil { - t.Fatalf("%v", err) - } -} - -func TestCluster_Start(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &management.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &management.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &management.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) -} - -func TestCluster_HealthCheck(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &management.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &management.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &management.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - - reqHealtiness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_HEALTHINESS} - reqLiveness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_LIVENESS} - reqReadiness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_READINESS} - - // healthiness - resHealthiness1, err := client1.NodeHealthCheck(reqHealtiness) - if err != nil { - t.Fatalf("%v", err) - } - expHealthiness1 := management.NodeHealthCheckResponse_HEALTHY - actHealthiness1 := resHealthiness1.State - if expHealthiness1 != actHealthiness1 { - t.Fatalf("expected content to see %v, saw %v", expHealthiness1, actHealthiness1) - } - - // liveness - resLiveness1, err := client1.NodeHealthCheck(reqLiveness) - if err != nil { - t.Fatalf("%v", err) - } - expLiveness1 := management.NodeHealthCheckResponse_ALIVE - actLiveness1 := resLiveness1.State - if expLiveness1 != actLiveness1 { - t.Fatalf("expected content to see %v, saw %v", expLiveness1, actLiveness1) - } - - // readiness - resReadiness1, err := client1.NodeHealthCheck(reqReadiness) - if err != nil { - t.Fatalf("%v", err) - } - expReadiness1 := management.NodeHealthCheckResponse_READY - actReadiness1 := resReadiness1.State - if expReadiness1 != actReadiness1 { - t.Fatalf("expected content to see %v, saw %v", expReadiness1, actReadiness1) - } - - // healthiness - resHealthiness2, err := client2.NodeHealthCheck(reqHealtiness) - if err != nil { - t.Fatalf("%v", err) - } - expHealthiness2 := management.NodeHealthCheckResponse_HEALTHY - actHealthiness2 := resHealthiness2.State - if expHealthiness2 != actHealthiness2 { - t.Fatalf("expected content to see %v, saw %v", expHealthiness2, actHealthiness2) - } - - // liveness - resLiveness2, err := client2.NodeHealthCheck(reqLiveness) - if err != nil { - t.Fatalf("%v", err) - } - expLiveness2 := management.NodeHealthCheckResponse_ALIVE - actLiveness2 := resLiveness2.State - if expLiveness2 != actLiveness2 { - t.Fatalf("expected content to see %v, saw %v", expLiveness2, actLiveness2) - } - - // readiness - resReadiness2, err := client2.NodeHealthCheck(reqReadiness) - if err != nil { - t.Fatalf("%v", err) - } - expReadiness2 := management.NodeHealthCheckResponse_READY - actReadiness2 := resReadiness2.State - if expReadiness2 != actReadiness2 { - t.Fatalf("expected content to see %v, saw %v", expReadiness2, actReadiness2) - } - - // healthiness - resHealthiness3, err := client3.NodeHealthCheck(reqHealtiness) - if err != nil { - t.Fatalf("%v", err) - } - expHealthiness3 := management.NodeHealthCheckResponse_HEALTHY - actHealthiness3 := resHealthiness3.State - if expHealthiness3 != actHealthiness3 { - t.Fatalf("expected content to see %v, saw %v", expHealthiness3, actHealthiness3) - } - - // liveness - resLiveness3, err := client3.NodeHealthCheck(reqLiveness) - if err != nil { - t.Fatalf("%v", err) - } - expLiveness3 := management.NodeHealthCheckResponse_ALIVE - actLiveness3 := resLiveness3.State - if expLiveness3 != actLiveness3 { - t.Fatalf("expected content to see %v, saw %v", expLiveness3, actLiveness3) - } - - // readiness - resReadiness3, err := client3.NodeHealthCheck(reqReadiness) - if err != nil { - t.Fatalf("%v", err) - } - expReadiness3 := management.NodeHealthCheckResponse_READY - actReadiness3 := resReadiness3.State - if expReadiness3 != actReadiness3 { - t.Fatalf("expected content to see %v, saw %v", expReadiness3, actReadiness3) - } -} - -func TestCluster_GetNode(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &management.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &management.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &management.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get all node info from all nodes - req := &empty.Empty{} - resNodeInfo11, err := client1.NodeInfo(req) - if err != nil { - t.Fatalf("%v", err) - } - expNode11 := &management.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - actNode11 := resNodeInfo11.Node - if !reflect.DeepEqual(expNode11, actNode11) { - t.Fatalf("expected content to see %v, saw %v", expNode11, actNode11) - } - - resNodeInfo21, err := client2.NodeInfo(req) - if err != nil { - t.Fatalf("%v", err) - } - expNode21 := &management.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - actNode21 := resNodeInfo21.Node - if !reflect.DeepEqual(expNode21, actNode21) { - t.Fatalf("expected content to see %v, saw %v", expNode21, actNode21) - } - - resNodeInfo31, err := client3.NodeInfo(req) - if err != nil { - t.Fatalf("%v", err) - } - expNode31 := &management.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - actNode31 := resNodeInfo31.Node - if !reflect.DeepEqual(expNode31, actNode31) { - t.Fatalf("expected content to see %v, saw %v", expNode31, actNode31) - } -} - -func TestCluster_GetCluster(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &management.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &management.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &management.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get cluster info from manager1 - req := &empty.Empty{} - resClusterInfo1, err := client1.ClusterInfo(req) - if err != nil { - t.Fatalf("%v", err) - } - expCluster1 := &management.Cluster{ - Nodes: map[string]*management.Node{ - nodeId1: { - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - }, - nodeId2: { - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - }, - nodeId3: { - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - }, - }, - } - actCluster1 := resClusterInfo1.Cluster - if !reflect.DeepEqual(expCluster1, actCluster1) { - t.Fatalf("expected content to see %v, saw %v", expCluster1, actCluster1) - } - - resClusterInfo2, err := client2.ClusterInfo(req) - if err != nil { - t.Fatalf("%v", err) - } - expCluster2 := &management.Cluster{ - Nodes: map[string]*management.Node{ - nodeId1: { - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - }, - nodeId2: { - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - }, - nodeId3: { - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - }, - }, - } - actCluster2 := resClusterInfo2.Cluster - if !reflect.DeepEqual(expCluster2, actCluster2) { - t.Fatalf("expected content to see %v, saw %v", expCluster2, actCluster2) - } - - resClusterInfo3, err := client3.ClusterInfo(req) - if err != nil { - t.Fatalf("%v", err) - } - expCluster3 := &management.Cluster{ - Nodes: map[string]*management.Node{ - nodeId1: { - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - }, - nodeId2: { - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - }, - nodeId3: { - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - }, - }, - } - actCluster3 := resClusterInfo3.Cluster - if !reflect.DeepEqual(expCluster3, actCluster3) { - t.Fatalf("expected content to see %v, saw %v", expCluster3, actCluster3) - } -} - -func TestCluster_Set(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := "node-1" - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &management.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - // sleep - time.Sleep(5 * time.Second) - - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := "node-2" - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &management.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - // sleep - time.Sleep(5 * time.Second) - - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := "node-3" - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &management.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - - valueAny := &any.Any{} - err = protobuf.UnmarshalAny("val1", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq1 := &management.SetRequest{ - Key: "test/key1", - Value: valueAny, - } - _, err = client1.Set(setReq1) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getReq1 := &management.GetRequest{ - Key: "test/key1", - } - getRes11, err := client1.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - val11, err := protobuf.MarshalAny(getRes11.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal11 := "val1" - actVal11 := *val11.(*string) - if !cmp.Equal(expVal11, actVal11) { - t.Fatalf("expected content to see %v, saw %v", expVal11, actVal11) - } - getRes21, err := client2.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - val21, err := protobuf.MarshalAny(getRes21.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal21 := "val1" - actVal21 := *val21.(*string) - if !cmp.Equal(expVal21, actVal21) { - t.Fatalf("expected content to see %v, saw %v", expVal21, actVal21) - } - getRes31, err := client3.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - val31, err := protobuf.MarshalAny(getRes31.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal31 := "val1" - actVal31 := *val31.(*string) - if !cmp.Equal(expVal31, actVal31) { - t.Fatalf("expected content to see %v, saw %v", expVal31, actVal31) - } - - valueAny = &any.Any{} - err = protobuf.UnmarshalAny("val2", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq2 := &management.SetRequest{ - Key: "test/key2", - Value: valueAny, - } - _, err = client2.Set(setReq2) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getReq2 := &management.GetRequest{ - Key: "test/key2", - } - getRes12, err := client1.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - val12, err := protobuf.MarshalAny(getRes12.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal12 := "val2" - actVal12 := *val12.(*string) - if !cmp.Equal(expVal12, actVal12) { - t.Fatalf("expected content to see %v, saw %v", expVal12, actVal12) - } - getRes22, err := client2.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - val22, err := protobuf.MarshalAny(getRes22.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal22 := "val2" - actVal22 := *val22.(*string) - if !cmp.Equal(expVal22, actVal22) { - t.Fatalf("expected content to see %v, saw %v", expVal22, actVal22) - } - getRes32, err := client3.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - val32, err := protobuf.MarshalAny(getRes32.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal32 := "val2" - actVal32 := *val32.(*string) - if !cmp.Equal(expVal32, actVal32) { - t.Fatalf("expected content to see %v, saw %v", expVal32, actVal32) - } - - valueAny = &any.Any{} - err = protobuf.UnmarshalAny("val3", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq3 := &management.SetRequest{ - Key: "test/key3", - Value: valueAny, - } - _, err = client3.Set(setReq3) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getReq3 := &management.GetRequest{ - Key: "test/key3", - } - getRes13, err := client1.Get(getReq3) - if err != nil { - t.Fatalf("%v", err) - } - val13, err := protobuf.MarshalAny(getRes13.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal13 := "val3" - actVal13 := *val13.(*string) - if !cmp.Equal(expVal13, actVal13) { - t.Fatalf("expected content to see %v, saw %v", expVal13, actVal13) - } - getRes23, err := client2.Get(getReq3) - if err != nil { - t.Fatalf("%v", err) - } - val23, err := protobuf.MarshalAny(getRes23.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal23 := "val3" - actVal23 := *val23.(*string) - if !cmp.Equal(expVal23, actVal23) { - t.Fatalf("expected content to see %v, saw %v", expVal23, actVal23) - } - getRes33, err := client3.Get(getReq3) - if err != nil { - t.Fatalf("%v", err) - } - val33, err := protobuf.MarshalAny(getRes33.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal33 := "val3" - actVal33 := *val33.(*string) - if !cmp.Equal(expVal33, actVal33) { - t.Fatalf("expected content to see %v, saw %v", expVal33, actVal33) - } -} - -func TestCluster_Get(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := "node-1" - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &management.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - // sleep - time.Sleep(5 * time.Second) - - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := "node-2" - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &management.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - // sleep - time.Sleep(5 * time.Second) - - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := "node-3" - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &management.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - - valueAny := &any.Any{} - err = protobuf.UnmarshalAny("val1", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq1 := &management.SetRequest{ - Key: "test/key1", - Value: valueAny, - } - _, err = client1.Set(setReq1) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getReq1 := &management.GetRequest{ - Key: "test/key1", - } - getRes11, err := client1.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - val11, err := protobuf.MarshalAny(getRes11.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal11 := "val1" - actVal11 := *val11.(*string) - if !cmp.Equal(expVal11, actVal11) { - t.Fatalf("expected content to see %v, saw %v", expVal11, actVal11) - } - getRes21, err := client2.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - val21, err := protobuf.MarshalAny(getRes21.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal21 := "val1" - actVal21 := *val21.(*string) - if !cmp.Equal(expVal21, actVal21) { - t.Fatalf("expected content to see %v, saw %v", expVal21, actVal21) - } - getRes31, err := client3.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - val31, err := protobuf.MarshalAny(getRes31.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal31 := "val1" - actVal31 := *val31.(*string) - if !cmp.Equal(expVal31, actVal31) { - t.Fatalf("expected content to see %v, saw %v", expVal31, actVal31) - } - - valueAny = &any.Any{} - err = protobuf.UnmarshalAny("val2", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq2 := &management.SetRequest{ - Key: "test/key2", - Value: valueAny, - } - _, err = client2.Set(setReq2) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getReq2 := &management.GetRequest{ - Key: "test/key2", - } - getRes12, err := client1.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - val12, err := protobuf.MarshalAny(getRes12.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal12 := "val2" - actVal12 := *val12.(*string) - if !cmp.Equal(expVal12, actVal12) { - t.Fatalf("expected content to see %v, saw %v", expVal12, actVal12) - } - getRes22, err := client2.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - val22, err := protobuf.MarshalAny(getRes22.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal22 := "val2" - actVal22 := *val22.(*string) - if !cmp.Equal(expVal22, actVal22) { - t.Fatalf("expected content to see %v, saw %v", expVal22, actVal22) - } - getRes32, err := client3.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - val32, err := protobuf.MarshalAny(getRes32.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal32 := "val2" - actVal32 := *val32.(*string) - if !cmp.Equal(expVal32, actVal32) { - t.Fatalf("expected content to see %v, saw %v", expVal32, actVal32) - } - - valueAny = &any.Any{} - err = protobuf.UnmarshalAny("val3", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq3 := &management.SetRequest{ - Key: "test/key3", - Value: valueAny, - } - _, err = client3.Set(setReq3) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getReq3 := &management.GetRequest{ - Key: "test/key3", - } - getRes13, err := client1.Get(getReq3) - if err != nil { - t.Fatalf("%v", err) - } - val13, err := protobuf.MarshalAny(getRes13.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal13 := "val3" - actVal13 := *val13.(*string) - if !cmp.Equal(expVal13, actVal13) { - t.Fatalf("expected content to see %v, saw %v", expVal13, actVal13) - } - getRes23, err := client2.Get(getReq3) - if err != nil { - t.Fatalf("%v", err) - } - val23, err := protobuf.MarshalAny(getRes23.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal23 := "val3" - actVal23 := *val23.(*string) - if !cmp.Equal(expVal23, actVal23) { - t.Fatalf("expected content to see %v, saw %v", expVal23, actVal23) - } - getRes33, err := client3.Get(getReq3) - if err != nil { - t.Fatalf("%v", err) - } - val33, err := protobuf.MarshalAny(getRes33.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal33 := "val3" - actVal33 := *val33.(*string) - if !cmp.Equal(expVal33, actVal33) { - t.Fatalf("expected content to see %v, saw %v", expVal33, actVal33) - } -} - -func TestCluster_Delete(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := "node-1" - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &management.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - // sleep - time.Sleep(5 * time.Second) - - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := "node-2" - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &management.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - // sleep - time.Sleep(5 * time.Second) - - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := "node-3" - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &management.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - - valueAny := &any.Any{} - err = protobuf.UnmarshalAny("val1", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq1 := &management.SetRequest{ - Key: "test/key1", - Value: valueAny, - } - _, err = client1.Set(setReq1) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getReq1 := &management.GetRequest{ - Key: "test/key1", - } - getRes11, err := client1.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - val11, err := protobuf.MarshalAny(getRes11.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal11 := "val1" - actVal11 := *val11.(*string) - if !cmp.Equal(expVal11, actVal11) { - t.Fatalf("expected content to see %v, saw %v", expVal11, actVal11) - } - getRes21, err := client2.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - val21, err := protobuf.MarshalAny(getRes21.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal21 := "val1" - actVal21 := *val21.(*string) - if !cmp.Equal(expVal21, actVal21) { - t.Fatalf("expected content to see %v, saw %v", expVal21, actVal21) - } - getRes31, err := client3.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - val31, err := protobuf.MarshalAny(getRes31.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal31 := "val1" - actVal31 := *val31.(*string) - if !cmp.Equal(expVal31, actVal31) { - t.Fatalf("expected content to see %v, saw %v", expVal31, actVal31) - } - - valueAny = &any.Any{} - err = protobuf.UnmarshalAny("val2", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq2 := &management.SetRequest{ - Key: "test/key2", - Value: valueAny, - } - _, err = client2.Set(setReq2) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getReq2 := &management.GetRequest{ - Key: "test/key2", - } - getRes12, err := client1.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - val12, err := protobuf.MarshalAny(getRes12.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal12 := "val2" - actVal12 := *val12.(*string) - if !cmp.Equal(expVal12, actVal12) { - t.Fatalf("expected content to see %v, saw %v", expVal12, actVal12) - } - getRes22, err := client2.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - val22, err := protobuf.MarshalAny(getRes22.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal22 := "val2" - actVal22 := *val22.(*string) - if !cmp.Equal(expVal22, actVal22) { - t.Fatalf("expected content to see %v, saw %v", expVal22, actVal22) - } - getRes32, err := client3.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - val32, err := protobuf.MarshalAny(getRes32.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal32 := "val2" - actVal32 := *val32.(*string) - if !cmp.Equal(expVal32, actVal32) { - t.Fatalf("expected content to see %v, saw %v", expVal32, actVal32) - } - - valueAny = &any.Any{} - err = protobuf.UnmarshalAny("val3", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq3 := &management.SetRequest{ - Key: "test/key3", - Value: valueAny, - } - _, err = client3.Set(setReq3) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getReq3 := &management.GetRequest{ - Key: "test/key3", - } - getRes13, err := client1.Get(getReq3) - if err != nil { - t.Fatalf("%v", err) - } - val13, err := protobuf.MarshalAny(getRes13.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal13 := "val3" - actVal13 := *val13.(*string) - if !cmp.Equal(expVal13, actVal13) { - t.Fatalf("expected content to see %v, saw %v", expVal13, actVal13) - } - getRes23, err := client2.Get(getReq3) - if err != nil { - t.Fatalf("%v", err) - } - val23, err := protobuf.MarshalAny(getRes23.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal23 := "val3" - actVal23 := *val23.(*string) - if !cmp.Equal(expVal23, actVal23) { - t.Fatalf("expected content to see %v, saw %v", expVal23, actVal23) - } - getRes33, err := client3.Get(getReq3) - if err != nil { - t.Fatalf("%v", err) - } - val33, err := protobuf.MarshalAny(getRes33.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal33 := "val3" - actVal33 := *val33.(*string) - if !cmp.Equal(expVal33, actVal33) { - t.Fatalf("expected content to see %v, saw %v", expVal33, actVal33) - } - - // delete - deleteReq1 := &management.DeleteRequest{ - Key: "test/key1", - } - _, err = client1.Delete(deleteReq1) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getRes11, err = client1.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - if getRes11.Value != nil { - t.Fatalf("%v", err) - } - getRes21, err = client2.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - if getRes21.Value != nil { - t.Fatalf("%v", err) - } - getRes31, err = client3.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - if getRes31.Value != nil { - t.Fatalf("%v", err) - } - - deleteReq2 := &management.DeleteRequest{ - Key: "test/key2", - } - _, err = client2.Delete(deleteReq2) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getRes12, err = client1.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - if getRes12.Value != nil { - t.Fatalf("%v", err) - } - getRes22, err = client2.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - if getRes22.Value != nil { - t.Fatalf("%v", err) - } - getRes32, err = client3.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - if getRes32.Value != nil { - t.Fatalf("%v", err) - } - - deleteReq3 := &management.DeleteRequest{ - Key: "test/key2", - } - _, err = client3.Delete(deleteReq3) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // delete non-existing data from manager1 - deleteNonExistingReq := &management.DeleteRequest{ - Key: "test/non-existing", - } - _, err = client1.Delete(deleteNonExistingReq) - if err != nil { - t.Fatalf("%v", err) - } - - // delete non-existing data from manager2 - _, err = client2.Delete(deleteNonExistingReq) - if err != nil { - t.Fatalf("%v", err) - } - - // delete non-existing data from manager3 - _, err = client3.Delete(deleteNonExistingReq) - if err != nil { - t.Fatalf("%v", err) - } -} diff --git a/indexutils/indexutils.go b/mapping/mapping.go similarity index 63% rename from indexutils/indexutils.go rename to mapping/mapping.go index 5c2dcfa..7bf0d24 100644 --- a/indexutils/indexutils.go +++ b/mapping/mapping.go @@ -1,18 +1,4 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexutils +package mapping import ( "encoding/json" @@ -22,16 +8,18 @@ import ( "github.com/blevesearch/bleve/mapping" ) +func NewIndexMapping() *mapping.IndexMappingImpl { + return mapping.NewIndexMapping() +} + func NewIndexMappingFromBytes(indexMappingBytes []byte) (*mapping.IndexMappingImpl, error) { indexMapping := mapping.NewIndexMapping() - err := indexMapping.UnmarshalJSON(indexMappingBytes) - if err != nil { + if err := indexMapping.UnmarshalJSON(indexMappingBytes); err != nil { return nil, err } - err = indexMapping.Validate() - if err != nil { + if err := indexMapping.Validate(); err != nil { return nil, err } diff --git a/maputils/error.go b/maputils/error.go deleted file mode 100644 index 455c9fc..0000000 --- a/maputils/error.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package maputils - -import "errors" - -var ( - ErrNotFound = errors.New("not found") -) diff --git a/maputils/maputils.go b/maputils/maputils.go deleted file mode 100644 index a5922fd..0000000 --- a/maputils/maputils.go +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package maputils - -import ( - "encoding/json" - "errors" - "strings" - - "github.com/imdario/mergo" - "github.com/stretchr/objx" - yaml "gopkg.in/yaml.v2" -) - -func splitKey(path string) []string { - keys := make([]string, 0) - for _, k := range strings.Split(path, "/") { - if k != "" { - keys = append(keys, k) - } - } - - return keys -} - -func makeSelector(key string) string { - return strings.Join(splitKey(key), objx.PathSeparator) -} - -func normalize(value interface{}) interface{} { - switch value.(type) { - case map[string]interface{}: - ret := Map{} - for k, v := range value.(map[string]interface{}) { - ret[k] = normalize(v) - } - return ret - case map[interface{}]interface{}: // when unmarshaled by yaml - ret := Map{} - for k, v := range value.(map[interface{}]interface{}) { - ret[k.(string)] = normalize(v) - } - return ret - case []interface{}: - ret := make([]interface{}, 0) - for _, v := range value.([]interface{}) { - ret = append(ret, normalize(v)) - } - return ret - case bool, string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr, float32, float64, complex64, complex128: - return value - default: - return value - } -} - -func makeMap(path string, value interface{}) interface{} { - var ret interface{} - - keys := splitKey(path) - - if len(keys) >= 1 { - ret = Map{keys[0]: makeMap(strings.Join(keys[1:], "/"), value)} - } else if len(keys) == 0 { - ret = normalize(value) - } - - return ret -} - -type Map map[string]interface{} - -func New() Map { - return Map{} -} - -func FromMap(src map[string]interface{}) Map { - return normalize(src).(Map) -} - -func FromJSON(src []byte) (Map, error) { - t := map[string]interface{}{} - err := json.Unmarshal(src, &t) - if err != nil { - return nil, err - } - - return FromMap(t), nil -} - -func FromYAML(src []byte) (Map, error) { - t := map[string]interface{}{} - err := yaml.Unmarshal(src, &t) - if err != nil { - return nil, err - } - - return FromMap(t), nil -} - -func (m Map) Has(key string) (bool, error) { - _, err := m.Get(key) - if err != nil { - return false, err - } - - return true, nil -} - -func (m Map) Set(key string, value interface{}) error { - _ = m.Delete(key) - - err := m.Merge(key, value) - if err != nil { - return err - } - - return nil -} - -func (m Map) Merge(key string, value interface{}) error { - mm := makeMap(key, value).(Map) - - err := mergo.Merge(&m, mm, mergo.WithOverride) - if err != nil { - return err - } - - return nil -} - -func (m Map) Get(key string) (interface{}, error) { - var tmpMap interface{} - - tmpMap = m - - keys := splitKey(key) - - if len(keys) <= 0 { - return tmpMap.(Map).ToMap(), nil - } - - iter := newIterator(splitKey(key)) - var value interface{} - for { - k, err := iter.value() - if err != nil { - return nil, err - } - - if _, ok := tmpMap.(Map)[k]; !ok { - return nil, ErrNotFound - } - - if iter.hasNext() { - tmpMap = tmpMap.(Map)[k] - iter.next() - } else { - value = tmpMap.(Map)[k] - break - } - } - - switch value.(type) { - case Map: - return value.(Map).ToMap(), nil - default: - return value, nil - } -} - -func (m Map) Delete(key string) error { - var tmpMap interface{} - - tmpMap = m - - keys := splitKey(key) - - if len(keys) <= 0 { - // clear map - err := m.Clear() - if err != nil { - return err - } - return nil - } - - iter := newIterator(splitKey(key)) - for { - k, err := iter.value() - if err != nil { - return err - } - - if _, ok := tmpMap.(Map)[k]; !ok { - return ErrNotFound - } - - if iter.hasNext() { - tmpMap = tmpMap.(Map)[k] - iter.next() - } else { - delete(tmpMap.(Map), k) - break - } - } - - return nil -} - -func (m Map) Clear() error { - for k := range m { - delete(m, k) - } - - return nil -} - -func (m Map) toMap(value interface{}) interface{} { - switch value.(type) { - case Map: - ret := map[string]interface{}{} - for k, v := range value.(Map) { - ret[k] = m.toMap(v) - } - return ret - case []interface{}: - ret := make([]interface{}, 0) - for _, v := range value.([]interface{}) { - ret = append(ret, m.toMap(v)) - } - return ret - case bool, string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr, float32, float64, complex64, complex128: - return value - default: - return value - } -} - -func (m Map) ToMap() map[string]interface{} { - return m.toMap(m).(map[string]interface{}) -} - -func (m Map) ToJSON() ([]byte, error) { - mm := m.ToMap() - b, err := json.Marshal(&mm) - if err != nil { - return nil, err - } - - return b, nil -} - -func (m Map) ToYAML() ([]byte, error) { - mm := m.ToMap() - b, err := yaml.Marshal(&mm) - if err != nil { - return nil, err - } - - return b, nil -} - -type iterator struct { - keys []string - pos int -} - -func newIterator(keys []string) *iterator { - return &iterator{ - keys: keys, - pos: 0, - } -} - -func (i *iterator) hasNext() bool { - return i.pos < len(i.keys)-1 -} - -func (i *iterator) next() bool { - i.pos++ - return i.pos < len(i.keys)-1 -} - -func (i *iterator) value() (string, error) { - if i.pos > len(i.keys)-1 { - return "", errors.New("value is not valid after iterator finished") - } - return i.keys[i.pos], nil -} diff --git a/maputils/maputils_test.go b/maputils/maputils_test.go deleted file mode 100644 index d71e400..0000000 --- a/maputils/maputils_test.go +++ /dev/null @@ -1,679 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package maputils - -import ( - "bytes" - "reflect" - "testing" -) - -func Test_splitKey(t *testing.T) { - key1 := "/a/b/c/d" - keys1 := splitKey(key1) - exp1 := []string{"a", "b", "c", "d"} - act1 := keys1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - key2 := "/" - keys2 := splitKey(key2) - exp2 := make([]string, 0) - act2 := keys2 - if !reflect.DeepEqual(exp2, act2) { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } - - key3 := "" - keys3 := splitKey(key3) - exp3 := make([]string, 0) - act3 := keys3 - if !reflect.DeepEqual(exp3, act3) { - t.Fatalf("expected content to see %v, saw %v", exp3, act3) - } -} - -func Test_makeSelector(t *testing.T) { - key1 := "/a/b/c/d" - selector1 := makeSelector(key1) - exp1 := "a.b.c.d" - act1 := selector1 - if exp1 != act1 { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - key2 := "/" - selector2 := makeSelector(key2) - exp2 := "" - act2 := selector2 - if exp2 != act2 { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } - - key3 := "" - selector3 := makeSelector(key3) - exp3 := "" - act3 := selector3 - if exp3 != act3 { - t.Fatalf("expected content to see %v, saw %v", exp3, act3) - } -} - -func Test_normalize(t *testing.T) { - data1 := map[string]interface{}{ - "a": map[string]interface{}{ - "b": map[string]interface{}{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - val1 := normalize(data1) - exp1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } -} - -func Test_makeMap(t *testing.T) { - val1 := makeMap("/a/b/c", "C").(Map) - exp1 := Map{ - "a": Map{ - "b": Map{ - "c": "C", - }, - }, - } - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - val2 := makeMap("a/b", map[string]interface{}{"c": "C"}).(Map) - exp2 := Map{ - "a": Map{ - "b": Map{ - "c": "C", - }, - }, - } - act2 := val2 - if !reflect.DeepEqual(exp2, act2) { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } -} - -func TestMap_FromMap(t *testing.T) { - map1 := FromMap(map[string]interface{}{ - "a": map[string]interface{}{ - "b": map[string]interface{}{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - }) - exp1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - act1 := map1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } -} - -func TestMap_ToMap(t *testing.T) { - map1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - val1 := map1.ToMap() - exp1 := map[string]interface{}{ - "a": map[string]interface{}{ - "b": map[string]interface{}{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } -} - -func Test_FromYAML(t *testing.T) { - map1, err := FromYAML([]byte(`a: - b: - c: abc - d: abd - e: - - ae1 - - ae2 -`)) - if err != nil { - t.Fatalf("%v", err) - } - exp1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - act1 := map1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } -} - -func Test_ToYAML(t *testing.T) { - map1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - - val1, err := map1.ToYAML() - if err != nil { - t.Fatalf("%v", err) - } - exp1 := []byte(`a: - b: - c: abc - d: abd - e: - - ae1 - - ae2 -`) - act1 := val1 - if !bytes.Equal(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } -} - -func Test_FromJSON(t *testing.T) { - map1, err := FromJSON([]byte(`{"a":{"b":{"c":"abc","d":"abd"},"e":["ae1","ae2"]}}`)) - if err != nil { - t.Fatalf("%v", err) - } - exp1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - act1 := map1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } -} - -func Test_ToJSON(t *testing.T) { - map1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - val1, err := map1.ToJSON() - if err != nil { - t.Fatalf("%v", err) - } - exp1 := []byte(`{"a":{"b":{"c":"abc","d":"abd"},"e":["ae1","ae2"]}}`) - act1 := val1 - if !bytes.Equal(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } -} - -func Test_Has(t *testing.T) { - map1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - - val1, err := map1.Has("a/b/c") - if err != nil { - t.Fatalf("%v", err) - } - exp1 := true - act1 := val1 - if exp1 != act1 { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - val2, err := map1.Get("a/b/f") - if err != ErrNotFound { - t.Fatalf("%v", err) - } - exp2 := false - act2 := val2 - if exp2 == act2 { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } -} - -func Test_Set(t *testing.T) { - map1 := Map{} - - err := map1.Set("/", Map{"a": "A"}) - if err != nil { - t.Fatalf("%v", err) - } - exp1 := Map{ - "a": "A", - } - act1 := map1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - err = map1.Set("/", Map{"A": "a"}) - if err != nil { - t.Fatalf("%v", err) - } - exp2 := Map{ - "A": "a", - } - act2 := map1 - if !reflect.DeepEqual(exp2, act2) { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } - - err = map1.Set("/", Map{"A": 1}) - if err != nil { - t.Fatalf("%v", err) - } - exp3 := Map{ - "A": 1, - } - act3 := map1 - if !reflect.DeepEqual(exp3, act3) { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } - - err = map1.Set("/A", "AAA") - if err != nil { - t.Fatalf("%v", err) - } - exp4 := Map{ - "A": "AAA", - } - act4 := map1 - if !reflect.DeepEqual(exp4, act4) { - t.Fatalf("expected content to see %v, saw %v", exp4, act4) - } - - err = map1.Set("/B", "BBB") - if err != nil { - t.Fatalf("%v", err) - } - exp5 := Map{ - "A": "AAA", - "B": "BBB", - } - act5 := map1 - if !reflect.DeepEqual(exp5, act5) { - t.Fatalf("expected content to see %v, saw %v", exp5, act5) - } - - err = map1.Set("/C", map[string]interface{}{"D": "CCC-DDD"}) - if err != nil { - t.Fatalf("%v", err) - } - exp6 := Map{ - "A": "AAA", - "B": "BBB", - "C": Map{ - "D": "CCC-DDD", - }, - } - act6 := map1 - if !reflect.DeepEqual(exp6, act6) { - t.Fatalf("expected content to see %v, saw %v", exp6, act6) - } -} - -func Test_Merge(t *testing.T) { - map1 := Map{} - - err := map1.Merge("/", Map{"a": "A"}) - if err != nil { - t.Fatalf("%v", err) - } - exp1 := Map{ - "a": "A", - } - act1 := map1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - err = map1.Merge("/a", "a") - if err != nil { - t.Fatalf("%v", err) - } - exp2 := Map{ - "a": "a", - } - act2 := map1 - if !reflect.DeepEqual(exp2, act2) { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } - - err = map1.Merge("/", Map{"a": 1}) - if err != nil { - t.Fatalf("%v", err) - } - exp3 := Map{ - "a": 1, - } - act3 := map1 - if !reflect.DeepEqual(exp3, act3) { - t.Fatalf("expected content to see %v, saw %v", exp3, act3) - } - - err = map1.Merge("/", Map{"b": 2}) - if err != nil { - t.Fatalf("%v", err) - } - exp4 := Map{ - "a": 1, - "b": 2, - } - act4 := map1 - if !reflect.DeepEqual(exp4, act4) { - t.Fatalf("expected content to see %v, saw %v", exp4, act4) - } - - err = map1.Merge("/c", 3) - if err != nil { - t.Fatalf("%v", err) - } - exp5 := Map{ - "a": 1, - "b": 2, - "c": 3, - } - act5 := map1 - if !reflect.DeepEqual(exp5, act5) { - t.Fatalf("expected content to see %v, saw %v", exp5, act5) - } - -} - -func Test_Get(t *testing.T) { - map1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - - val1, err := map1.Get("a/b/c") - if err != nil { - t.Fatalf("%v", err) - } - exp1 := "abc" - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - val2, err := map1.Get("a") - if err != nil { - t.Fatalf("%v", err) - } - exp2 := map[string]interface{}{ - "b": map[string]interface{}{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - } - act2 := val2 - if !reflect.DeepEqual(exp2, act2) { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } -} - -func Test_Delete(t *testing.T) { - map1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - - err := map1.Delete("a/b/c") - if err != nil { - t.Fatalf("%v", err) - } - exp1 := Map{ - "a": Map{ - "b": Map{ - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - act1 := map1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - -} - -//func Test_Get(t *testing.T) { -// data1 := objx.Map{ -// "a": objx.Map{ -// "b": objx.Map{ -// "c": "abc", -// "d": "abd", -// }, -// "e": []interface{}{ -// "ae1", -// "ae2", -// }, -// }, -// } -// key1 := "/" -// val1, err := Get(data1, key1) -// if err != nil { -// t.Fatalf("%v", err) -// } -// exp1 := map[string]interface{}{ -// "a": map[string]interface{}{ -// "b": map[string]interface{}{ -// "c": "abc", -// "d": "abd", -// }, -// "e": []interface{}{ -// "ae1", -// "ae2", -// }, -// }, -// } -// act1 := val1 -// if !reflect.DeepEqual(exp1, act1) { -// t.Fatalf("expected content to see %v, saw %v", exp1, act1) -// } -// -// key2 := "/a" -// val2, err := Get(data1, key2) -// if err != nil { -// t.Fatalf("%v", err) -// } -// exp2 := map[string]interface{}{ -// "b": map[string]interface{}{ -// "c": "abc", -// "d": "abd", -// }, -// "e": []interface{}{ -// "ae1", -// "ae2", -// }, -// } -// act2 := val2 -// if !reflect.DeepEqual(exp2, act2) { -// t.Fatalf("expected content to see %v, saw %v", exp2, act2) -// } -//} - -//func Test_Set(t *testing.T) { -// data := map[string]interface{}{} -// -// data, err := Set(data, "/", map[string]interface{}{"a": 1}, true) -// if err != nil { -// t.Fatalf("%v", err) -// } -// -// exp1 := 1 -// act1 := val1 -// if exp1 != act1 { -// t.Fatalf("expected content to see %v, saw %v", exp1, act1) -// } -// -// fsm.applySet("/b/bb", map[string]interface{}{"b": 1}, false) -// -// val2, err := fsm.Get("/b") -// if err != nil { -// t.Fatalf("%v", err) -// } -// -// exp2 := map[string]interface{}{"bb": map[string]interface{}{"b": 1}} -// act2 := val2.(map[string]interface{}) -// if !reflect.DeepEqual(exp2, act2) { -// t.Fatalf("expected content to see %v, saw %v", exp2, act2) -// } -// -// fsm.applySet("/", map[string]interface{}{"a": 1}, false) -// -// val3, err := fsm.Get("/") -// if err != nil { -// t.Fatalf("%v", err) -// } -// -// exp3 := map[string]interface{}{"a": 1} -// act3 := val3 -// if !reflect.DeepEqual(exp3, act3) { -// t.Fatalf("expected content to see %v, saw %v", exp3, act3) -// } -// -// fsm.applySet("/", map[string]interface{}{"b": 2}, true) -// -// val4, err := fsm.Get("/") -// if err != nil { -// t.Fatalf("%v", err) -// } -// -// exp4 := map[string]interface{}{"a": 1, "b": 2} -// act4 := val4 -// if !reflect.DeepEqual(exp4, act4) { -// t.Fatalf("expected content to see %v, saw %v", exp4, act4) -// } -//} diff --git a/marshaler/marshaler.go b/marshaler/marshaler.go new file mode 100644 index 0000000..22c615c --- /dev/null +++ b/marshaler/marshaler.go @@ -0,0 +1,186 @@ +package marshaler + +import ( + "bufio" + "bytes" + "encoding/json" + "io" + "io/ioutil" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/protobuf" +) + +var ( + DefaultContentType = "application/json" +) + +type BlastMarshaler struct{} + +func (*BlastMarshaler) ContentType() string { + return DefaultContentType +} + +func (m *BlastMarshaler) Marshal(v interface{}) ([]byte, error) { + switch v.(type) { + case *protobuf.GetResponse: + var fields map[string]interface{} + if err := json.Unmarshal(v.(*protobuf.GetResponse).Fields, &fields); err != nil { + return nil, err + } + resp := map[string]interface{}{ + "fields": fields, + } + if value, err := json.Marshal(resp); err == nil { + return value, nil + } else { + return nil, err + } + case *protobuf.SearchResponse: + var searchResult map[string]interface{} + if err := json.Unmarshal(v.(*protobuf.SearchResponse).SearchResult, &searchResult); err != nil { + return nil, err + } + resp := map[string]interface{}{ + "search_result": searchResult, + } + if value, err := json.Marshal(resp); err == nil { + return value, nil + } else { + return nil, err + } + case *protobuf.MappingResponse: + var m map[string]interface{} + if err := json.Unmarshal(v.(*protobuf.MappingResponse).Mapping, &m); err != nil { + return nil, err + } + resp := map[string]interface{}{ + "mapping": m, + } + if value, err := json.Marshal(resp); err == nil { + return value, nil + } else { + return nil, err + } + case *protobuf.MetricsResponse: + value := v.(*protobuf.MetricsResponse).Metrics + return value, nil + default: + return json.Marshal(v) + } +} + +func (m *BlastMarshaler) Unmarshal(data []byte, v interface{}) error { + switch v.(type) { + case *protobuf.SetRequest: + var m map[string]interface{} + if err := json.Unmarshal(data, &m); err != nil { + return err + } + + if i, ok := m["id"].(string); ok { + v.(*protobuf.SetRequest).Id = i + } + + if f, ok := m["fields"].(map[string]interface{}); ok { + fieldsBytes, err := json.Marshal(f) + if err != nil { + return err + } + v.(*protobuf.SetRequest).Fields = fieldsBytes + } + return nil + case *protobuf.BulkIndexRequest: + v.(*protobuf.BulkIndexRequest).Requests = make([]*protobuf.SetRequest, 0) + + reader := bufio.NewReader(bytes.NewReader(data)) + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + r := &protobuf.SetRequest{} + if err := m.Unmarshal(docBytes, r); err != nil { + continue + } + v.(*protobuf.BulkIndexRequest).Requests = append(v.(*protobuf.BulkIndexRequest).Requests, r) + } + break + } + } + if len(docBytes) > 0 { + r := &protobuf.SetRequest{} + if err := m.Unmarshal(docBytes, r); err != nil { + continue + } + v.(*protobuf.BulkIndexRequest).Requests = append(v.(*protobuf.BulkIndexRequest).Requests, r) + } + } + return nil + case *protobuf.BulkDeleteRequest: + v.(*protobuf.BulkDeleteRequest).Requests = make([]*protobuf.DeleteRequest, 0) + + reader := bufio.NewReader(bytes.NewReader(data)) + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + r := &protobuf.DeleteRequest{ + Id: strings.TrimSpace(string(docBytes)), + } + v.(*protobuf.BulkDeleteRequest).Requests = append(v.(*protobuf.BulkDeleteRequest).Requests, r) + } + break + } + } + if len(docBytes) > 0 { + r := &protobuf.DeleteRequest{ + Id: strings.TrimSpace(string(docBytes)), + } + v.(*protobuf.BulkDeleteRequest).Requests = append(v.(*protobuf.BulkDeleteRequest).Requests, r) + } + } + return nil + case *protobuf.SearchRequest: + var m map[string]interface{} + if err := json.Unmarshal(data, &m); err != nil { + return err + } + f, ok := m["search_request"] + if !ok { + return errors.ErrNil + } + searchRequestBytes, err := json.Marshal(f) + if err != nil { + return err + } + v.(*protobuf.SearchRequest).SearchRequest = searchRequestBytes + return nil + default: + return json.Unmarshal(data, v) + } +} + +func (m *BlastMarshaler) NewDecoder(r io.Reader) runtime.Decoder { + return runtime.DecoderFunc( + func(v interface{}) error { + buffer, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + return m.Unmarshal(buffer, v) + }, + ) +} + +func (m *BlastMarshaler) NewEncoder(w io.Writer) runtime.Encoder { + return json.NewEncoder(w) +} + +func (m *BlastMarshaler) Delimiter() []byte { + return []byte("\n") +} diff --git a/marshaler/util.go b/marshaler/util.go new file mode 100644 index 0000000..e935b8b --- /dev/null +++ b/marshaler/util.go @@ -0,0 +1,69 @@ +package marshaler + +import ( + "encoding/json" + "reflect" + + "github.com/golang/protobuf/ptypes/any" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/registry" +) + +func init() { + registry.RegisterType("protobuf.LivenessCheckResponse", reflect.TypeOf(protobuf.LivenessCheckResponse{})) + registry.RegisterType("protobuf.ReadinessCheckResponse", reflect.TypeOf(protobuf.ReadinessCheckResponse{})) + registry.RegisterType("protobuf.Metadata", reflect.TypeOf(protobuf.Metadata{})) + registry.RegisterType("protobuf.Node", reflect.TypeOf(protobuf.Node{})) + registry.RegisterType("protobuf.Cluster", reflect.TypeOf(protobuf.Cluster{})) + registry.RegisterType("protobuf.JoinRequest", reflect.TypeOf(protobuf.JoinRequest{})) + registry.RegisterType("protobuf.LeaveRequest", reflect.TypeOf(protobuf.LeaveRequest{})) + registry.RegisterType("protobuf.NodeResponse", reflect.TypeOf(protobuf.NodeResponse{})) + registry.RegisterType("protobuf.ClusterResponse", reflect.TypeOf(protobuf.ClusterResponse{})) + registry.RegisterType("protobuf.GetRequest", reflect.TypeOf(protobuf.GetRequest{})) + registry.RegisterType("protobuf.GetResponse", reflect.TypeOf(protobuf.GetResponse{})) + registry.RegisterType("protobuf.SetRequest", reflect.TypeOf(protobuf.SetRequest{})) + registry.RegisterType("protobuf.DeleteRequest", reflect.TypeOf(protobuf.DeleteRequest{})) + registry.RegisterType("protobuf.BulkIndexRequest", reflect.TypeOf(protobuf.BulkIndexRequest{})) + registry.RegisterType("protobuf.BulkDeleteRequest", reflect.TypeOf(protobuf.BulkDeleteRequest{})) + registry.RegisterType("protobuf.SetMetadataRequest", reflect.TypeOf(protobuf.SetMetadataRequest{})) + registry.RegisterType("protobuf.DeleteMetadataRequest", reflect.TypeOf(protobuf.DeleteMetadataRequest{})) + registry.RegisterType("protobuf.Event", reflect.TypeOf(protobuf.Event{})) + registry.RegisterType("protobuf.WatchResponse", reflect.TypeOf(protobuf.WatchResponse{})) + registry.RegisterType("protobuf.MetricsResponse", reflect.TypeOf(protobuf.MetricsResponse{})) + registry.RegisterType("protobuf.Document", reflect.TypeOf(protobuf.Document{})) + registry.RegisterType("map[string]interface {}", reflect.TypeOf((map[string]interface{})(nil))) +} + +func MarshalAny(message *any.Any) (interface{}, error) { + if message == nil { + return nil, nil + } + + typeUrl := message.TypeUrl + value := message.Value + + instance := registry.TypeInstanceByName(typeUrl) + + if err := json.Unmarshal(value, instance); err != nil { + return nil, err + } else { + return instance, nil + } + +} + +func UnmarshalAny(instance interface{}, message *any.Any) error { + if instance == nil { + return nil + } + + value, err := json.Marshal(instance) + if err != nil { + return err + } + + message.TypeUrl = registry.TypeNameByInstance(instance) + message.Value = value + + return nil +} diff --git a/marshaler/util_test.go b/marshaler/util_test.go new file mode 100644 index 0000000..da72cd4 --- /dev/null +++ b/marshaler/util_test.go @@ -0,0 +1,109 @@ +package marshaler + +import ( + "bytes" + "testing" + + "github.com/golang/protobuf/ptypes/any" + "github.com/mosuka/blast/protobuf" +) + +func TestMarshalAny(t *testing.T) { + // test map[string]interface{} + data := map[string]interface{}{"a": 1, "b": 2, "c": 3} + + mapAny := &any.Any{} + err := UnmarshalAny(data, mapAny) + if err != nil { + t.Errorf("%v", err) + } + + expectedType := "map[string]interface {}" + actualType := mapAny.TypeUrl + if expectedType != actualType { + t.Errorf("expected content to see %s, saw %s", expectedType, actualType) + } + + expectedValue := []byte(`{"a":1,"b":2,"c":3}`) + actualValue := mapAny.Value + if !bytes.Equal(expectedValue, actualValue) { + t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) + } + + // test kvs.Node + node := &protobuf.Node{ + RaftAddress: ":7000", + State: "Leader", + Metadata: &protobuf.Metadata{ + GrpcAddress: ":9000", + HttpAddress: ":8000", + }, + } + + nodeAny := &any.Any{} + err = UnmarshalAny(node, nodeAny) + if err != nil { + t.Errorf("%v", err) + } + + expectedType = "protobuf.Node" + actualType = nodeAny.TypeUrl + if expectedType != actualType { + t.Errorf("expected content to see %s, saw %s", expectedType, actualType) + } + + expectedValue = []byte(`{"raft_address":":7000","metadata":{"grpc_address":":9000","http_address":":8000"},"state":"Leader"}`) + actualValue = nodeAny.Value + if !bytes.Equal(expectedValue, actualValue) { + t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) + } +} + +func TestUnmarshalAny(t *testing.T) { + // test map[string]interface{} + dataAny := &any.Any{ + TypeUrl: "map[string]interface {}", + Value: []byte(`{"a":1,"b":2,"c":3}`), + } + + data, err := MarshalAny(dataAny) + if err != nil { + t.Errorf("%v", err) + } + dataMap := *data.(*map[string]interface{}) + + if dataMap["a"] != float64(1) { + t.Errorf("expected content to see %v, saw %v", 1, dataMap["a"]) + } + if dataMap["b"] != float64(2) { + t.Errorf("expected content to see %v, saw %v", 2, dataMap["b"]) + } + if dataMap["c"] != float64(3) { + t.Errorf("expected content to see %v, saw %v", 3, dataMap["c"]) + } + + // raft.Node + dataAny = &any.Any{ + TypeUrl: "protobuf.Node", + Value: []byte(`{"raft_address":":7000","metadata":{"grpc_address":":9000","http_address":":8000"},"state":"Leader"}`), + } + + data, err = MarshalAny(dataAny) + if err != nil { + t.Errorf("%v", err) + } + node := data.(*protobuf.Node) + + if node.RaftAddress != ":7000" { + t.Errorf("expected content to see %v, saw %v", ":7000", node.RaftAddress) + } + if node.Metadata.GrpcAddress != ":9000" { + t.Errorf("expected content to see %v, saw %v", ":9000", node.Metadata.GrpcAddress) + } + if node.Metadata.HttpAddress != ":8000" { + t.Errorf("expected content to see %v, saw %v", ":8000", node.Metadata.HttpAddress) + } + if node.State != "Leader" { + t.Errorf("expected content to see %v, saw %v", "Leader", node.State) + } +} diff --git a/metric/metric.go b/metric/metric.go new file mode 100644 index 0000000..9e6ba20 --- /dev/null +++ b/metric/metric.go @@ -0,0 +1,895 @@ +package metric + +import ( + grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + // Create a metrics registry. + Registry = prometheus.NewRegistry() + + // Create some standard server metrics. + GrpcMetrics = grpcprometheus.NewServerMetrics( + func(o *prometheus.CounterOpts) { + o.Namespace = "blast" + }, + ) + + // Raft node state metric + RaftStateMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "state", + Help: "Node state. 0:Follower, 1:Candidate, 2:Leader, 3:Shutdown", + }, []string{"id"}) + + RaftTermMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "term", + Help: "Term.", + }, []string{"id"}) + + RaftLastLogIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "last_log_index", + Help: "Last log index.", + }, []string{"id"}) + + RaftLastLogTermMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "last_log_term", + Help: "Last log term.", + }, []string{"id"}) + + RaftCommitIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "commit_index", + Help: "Commit index.", + }, []string{"id"}) + + RaftAppliedIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "applied_index", + Help: "Applied index.", + }, []string{"id"}) + + RaftFsmPendingMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "fsm_pending", + Help: "FSM pending.", + }, []string{"id"}) + + RaftLastSnapshotIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "last_snapshot_index", + Help: "Last snapshot index.", + }, []string{"id"}) + + RaftLastSnapshotTermMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "last_snapshot_term", + Help: "Last snapshot term.", + }, []string{"id"}) + + RaftLatestConfigurationIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "latest_configuration_index", + Help: "Latest configuration index.", + }, []string{"id"}) + + RaftNumPeersMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "num_peers", + Help: "Number of peers.", + }, []string{"id"}) + + RaftLastContactMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "last_copntact", + Help: "Last contact.", + }, []string{"id"}) + + RaftNumNodesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "num_nodes", + Help: "Number of nodes.", + }, []string{"id"}) + + IndexCurOnDiskBytesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "cur_on_disk_bytes", + Help: "cur_on_disk_bytes", + }, []string{"id"}) + + IndexCurOnDiskFilesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "cur_on_disk_files", + Help: "cur_on_disk_files", + }, []string{"id"}) + + IndexCurRootEpochMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "cur_root_epoch", + Help: "cur_root_epoch", + }, []string{"id"}) + + IndexLastMergedEpochMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "last_merged_epoch", + Help: "last_merged_epoch", + }, []string{"id"}) + + IndexLastPersistedEpochMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "last_persisted_epoch", + Help: "last_persisted_epoch", + }, []string{"id"}) + + IndexMaxBatchIntroTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "max_batch_intro_time", + Help: "max_batch_intro_time", + }, []string{"id"}) + + IndexMaxFileMergeZapTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "max_file_merge_zap_time", + Help: "max_file_merge_zap_time", + }, []string{"id"}) + + IndexMaxMemMergeZapTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "max_mem_merge_zap_time", + Help: "max_mem_merge_zap_time", + }, []string{"id"}) + + IndexTotAnalysisTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_analysis_time", + Help: "tot_analysis_time", + }, []string{"id"}) + + IndexTotBatchIntroTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_batch_intro_time", + Help: "tot_batch_intro_time", + }, []string{"id"}) + + IndexTotBatchesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_batches", + Help: "tot_batches", + }, []string{"id"}) + + IndexTotBatchesEmptyMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_batches_empty", + Help: "tot_batches_empty", + }, []string{"id"}) + + IndexTotDeletesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_deletes", + Help: "tot_deletes", + }, []string{"id"}) + + IndexTotFileMergeIntroductionsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_introductions", + Help: "tot_file_merge_introductions", + }, []string{"id"}) + + IndexTotFileMergeIntroductionsDoneMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_introductions_done", + Help: "tot_file_merge_introductions_done", + }, []string{"id"}) + + IndexTotFileMergeIntroductionsSkippedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_introductions_skipped", + Help: "tot_file_merge_introductions_skipped", + }, []string{"id"}) + + IndexTotFileMergeLoopBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_loop_beg", + Help: "tot_file_merge_loop_beg", + }, []string{"id"}) + + IndexTotFileMergeLoopEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_loop_end", + Help: "tot_file_merge_loop_end", + }, []string{"id"}) + + IndexTotFileMergeLoopErrMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_loop_err", + Help: "tot_file_merge_loop_err", + }, []string{"id"}) + + IndexTotFileMergePlanMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan", + Help: "tot_file_merge_plan", + }, []string{"id"}) + + IndexTotFileMergePlanErrMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_err", + Help: "tot_file_merge_plan_err", + }, []string{"id"}) + + IndexTotFileMergePlanNoneMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_none", + Help: "tot_file_merge_plan_none", + }, []string{"id"}) + + IndexTotFileMergePlanOkMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_ok", + Help: "tot_file_merge_plan_ok", + }, []string{"id"}) + + IndexTotFileMergePlanTasksMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_tasks", + Help: "tot_file_merge_plan_tasks", + }, []string{"id"}) + + IndexTotFileMergePlanTasksDoneMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_tasks_done", + Help: "tot_file_merge_plan_tasks_done", + }, []string{"id"}) + + IndexTotFileMergePlanTasksErrMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_tasks_err", + Help: "tot_file_merge_plan_tasks_err", + }, []string{"id"}) + + IndexTotFileMergePlanTasksSegmentsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_tasks_segments", + Help: "tot_file_merge_plan_tasks_segments", + }, []string{"id"}) + + IndexTotFileMergePlanTasksSegmentsEmptyMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_tasks_segments_empty", + Help: "tot_file_merge_plan_tasks_segments_empty", + }, []string{"id"}) + + IndexTotFileMergeSegmentsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_segments", + Help: "tot_file_merge_segments", + }, []string{"id"}) + + IndexTotFileMergeSegmentsEmptyMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_segments_empty", + Help: "tot_file_merge_segments_empty", + }, []string{"id"}) + + IndexTotFileMergeWrittenBytesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_written_bytes", + Help: "tot_file_merge_written_bytes", + }, []string{"id"}) + + IndexTotFileMergeZapBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_zap_beg", + Help: "tot_file_merge_zap_beg", + }, []string{"id"}) + + IndexTotFileMergeZapEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_zap_end", + Help: "tot_file_merge_zap_end", + }, []string{"id"}) + + IndexTotFileMergeZapTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_zap_time", + Help: "tot_file_merge_zap_time", + }, []string{"id"}) + + IndexTotFileSegmentsAtRootMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_segments_at_root", + Help: "tot_file_segments_at_root", + }, []string{"id"}) + + IndexTotIndexTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_index_time", + Help: "tot_index_time", + }, []string{"id"}) + + IndexTotIndexedPlainTextBytesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_indexed_plain_text_bytes", + Help: "tot_indexed_plain_text_bytes", + }, []string{"id"}) + + IndexTotIntroduceLoopMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_loop", + Help: "tot_introduce_loop", + }, []string{"id"}) + + IndexTotIntroduceMergeBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_merge_beg", + Help: "tot_introduce_merge_beg", + }, []string{"id"}) + + IndexTotIntroduceMergeEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_merge_end", + Help: "tot_introduce_merge_end", + }, []string{"id"}) + + IndexTotIntroducePersistBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_persist_beg", + Help: "tot_introduce_persist_beg", + }, []string{"id"}) + + IndexTotIntroducePersistEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_persist_end", + Help: "tot_introduce_persist_end", + }, []string{"id"}) + + IndexTotIntroduceRevertBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_revert_beg", + Help: "tot_introduce_revert_beg", + }, []string{"id"}) + + IndexTotIntroduceRevertEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_revert_end", + Help: "tot_introduce_revert_end", + }, []string{"id"}) + + IndexTotIntroduceSegmentBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_segment_beg", + Help: "tot_introduce_segment_beg", + }, []string{"id"}) + + IndexTotIntroduceSegmentEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_segment_end", + Help: "tot_introduce_segment_end", + }, []string{"id"}) + + IndexTotIntroducedItemsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduced_items", + Help: "tot_introduced_items", + }, []string{"id"}) + + IndexTotIntroducedSegmentsBatchMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduced_segments_batch", + Help: "tot_introduced_segments_batch", + }, []string{"id"}) + + IndexTotIntroducedSegmentsMergeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduced_segments_merge", + Help: "tot_introduced_segments_merge", + }, []string{"id"}) + + IndexTotItemsToPersistMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_items_to_persist", + Help: "tot_items_to_persist", + }, []string{"id"}) + + IndexTotMemMergeBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_beg", + Help: "tot_mem_merge_beg", + }, []string{"id"}) + + IndexTotMemMergeDoneMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_done", + Help: "tot_mem_merge_done", + }, []string{"id"}) + + IndexTotMemMergeErrMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_err", + Help: "tot_mem_merge_err", + }, []string{"id"}) + + IndexTotMemMergeSegmentsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_segments", + Help: "tot_mem_merge_segments", + }, []string{"id"}) + + IndexTotMemMergeZapBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_zap_beg", + Help: "tot_mem_merge_zap_beg", + }, []string{"id"}) + + IndexTotMemMergeZapEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_zap_end", + Help: "tot_mem_merge_zap_end", + }, []string{"id"}) + + IndexTotMemMergeZapTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_zap_time", + Help: "tot_mem_merge_zap_time", + }, []string{"id"}) + + IndexTotMemorySegmentsAtRootMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_memory_segments_at_root", + Help: "tot_memory_segments_at_root", + }, []string{"id"}) + + IndexTotOnErrorsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_on_errors", + Help: "tot_on_errors", + }, []string{"id"}) + + IndexTotPersistLoopBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persist_loop_beg", + Help: "tot_persist_loop_beg", + }, []string{"id"}) + + IndexTotPersistLoopEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persist_loop_end", + Help: "tot_persist_loop_end", + }, []string{"id"}) + + IndexTotPersistLoopErrMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persist_loop_err", + Help: "tot_persist_loop_err", + }, []string{"id"}) + + IndexTotPersistLoopProgressMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persist_loop_progress", + Help: "tot_persist_loop_progress", + }, []string{"id"}) + + IndexTotPersistLoopWaitMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persist_loop_wait", + Help: "tot_persist_loop_wait", + }, []string{"id"}) + + IndexTotPersistLoopWaitNotifiedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persist_loop_wait_notified", + Help: "tot_persist_loop_wait_notified", + }, []string{"id"}) + + IndexTotPersistedItemsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persisted_items", + Help: "tot_persisted_items", + }, []string{"id"}) + + IndexTotPersistedSegmentsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persisted_segments", + Help: "tot_persisted_segments", + }, []string{"id"}) + + IndexTotPersisterMergerNapBreakMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persister_merger_nap_break", + Help: "tot_persister_merger_nap_break", + }, []string{"id"}) + + IndexTotPersisterNapPauseCompletedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persister_nap_pause_completed", + Help: "tot_persister_nap_pause_completed", + }, []string{"id"}) + + IndexTotPersisterSlowMergerPauseMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persister_slow_merger_pause", + Help: "tot_persister_slow_merger_pause", + }, []string{"id"}) + + IndexTotPersisterSlowMergerResumeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persister_slow_merger_resume", + Help: "tot_persister_slow_merger_resume", + }, []string{"id"}) + + IndexTotTermSearchersFinishedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_term_searchers_finished", + Help: "tot_term_searchers_finished", + }, []string{"id"}) + + IndexTotTermSearchersStartedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_term_searchers_started", + Help: "tot_term_searchers_started", + }, []string{"id"}) + + IndexTotUpdatesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_updates", + Help: "tot_updates", + }, []string{"id"}) + + IndexAnalysisTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "analysis_time", + Help: "analysis_time", + }, []string{"id"}) + + IndexBatchesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "batches", + Help: "batches", + }, []string{"id"}) + + IndexDeletesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "deletes", + Help: "deletes", + }, []string{"id"}) + + IndexErrorsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "errors", + Help: "errors", + }, []string{"id"}) + + IndexIndexTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "index_time", + Help: "index_time", + }, []string{"id"}) + + IndexNumBytesUsedDiskMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_bytes_used_disk", + Help: "num_bytes_used_disk", + }, []string{"id"}) + + IndexNumFilesOnDiskMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_files_on_disk", + Help: "num_files_on_disk", + }, []string{"id"}) + + IndexNumItemsIntroducedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_items_introduced", + Help: "num_items_introduced", + }, []string{"id"}) + + IndexNumItemsPersistedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_items_persisted", + Help: "num_items_persisted", + }, []string{"id"}) + + IndexNumPersisterNapMergerBreakMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_persister_nap_merger_break", + Help: "num_persister_nap_merger_break", + }, []string{"id"}) + + IndexNumPersisterNapPauseCompletedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_persister_nap_pause_completed", + Help: "num_persister_nap_pause_completed", + }, []string{"id"}) + + IndexNumPlainTextBytesIndexedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_plain_text_bytes_indexed", + Help: "num_plain_text_bytes_indexed", + }, []string{"id"}) + + IndexNumRecsToPersistMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_recs_to_persist", + Help: "num_recs_to_persist", + }, []string{"id"}) + + IndexNumRootFilesegmentsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_root_filesegments", + Help: "num_root_filesegments", + }, []string{"id"}) + + IndexNumRootMemorysegmentsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_root_memorysegments", + Help: "num_root_memorysegments", + }, []string{"id"}) + + IndexTermSearchersFinishedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "term_searchers_finished", + Help: "term_searchers_finished", + }, []string{"id"}) + + IndexTermSearchersStartedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "term_searchers_started", + Help: "term_searchers_started", + }, []string{"id"}) + + IndexTotalCompactionWrittenBytesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "total_compaction_written_bytes", + Help: "total_compaction_written_bytes", + }, []string{"id"}) + + IndexUpdatesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "updates", + Help: "updates", + }, []string{"id"}) + + SearchTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "search_time", + Help: "search_time", + }, []string{"id"}) + + SearchesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "searches", + Help: "searches", + }, []string{"id"}) +) + +func init() { + // Register standard server metrics and customized metrics to registry. + Registry.MustRegister( + GrpcMetrics, + RaftStateMetric, + RaftTermMetric, + RaftLastLogIndexMetric, + RaftLastLogTermMetric, + RaftCommitIndexMetric, + RaftAppliedIndexMetric, + RaftFsmPendingMetric, + RaftLastSnapshotIndexMetric, + RaftLastSnapshotTermMetric, + RaftLatestConfigurationIndexMetric, + RaftNumPeersMetric, + RaftLastContactMetric, + RaftNumNodesMetric, + IndexCurOnDiskBytesMetric, + IndexCurOnDiskFilesMetric, + IndexCurRootEpochMetric, + IndexLastMergedEpochMetric, + IndexLastPersistedEpochMetric, + IndexMaxBatchIntroTimeMetric, + IndexMaxFileMergeZapTimeMetric, + IndexMaxMemMergeZapTimeMetric, + IndexTotAnalysisTimeMetric, + IndexTotBatchIntroTimeMetric, + IndexTotBatchesMetric, + IndexTotBatchesEmptyMetric, + IndexTotDeletesMetric, + IndexTotFileMergeIntroductionsMetric, + IndexTotFileMergeIntroductionsDoneMetric, + IndexTotFileMergeIntroductionsSkippedMetric, + IndexTotFileMergeLoopBegMetric, + IndexTotFileMergeLoopEndMetric, + IndexTotFileMergeLoopErrMetric, + IndexTotFileMergePlanMetric, + IndexTotFileMergePlanErrMetric, + IndexTotFileMergePlanNoneMetric, + IndexTotFileMergePlanOkMetric, + IndexTotFileMergePlanTasksMetric, + IndexTotFileMergePlanTasksDoneMetric, + IndexTotFileMergePlanTasksErrMetric, + IndexTotFileMergePlanTasksSegmentsMetric, + IndexTotFileMergePlanTasksSegmentsEmptyMetric, + IndexTotFileMergeSegmentsMetric, + IndexTotFileMergeSegmentsEmptyMetric, + IndexTotFileMergeWrittenBytesMetric, + IndexTotFileMergeZapBegMetric, + IndexTotFileMergeZapEndMetric, + IndexTotFileMergeZapTimeMetric, + IndexTotFileSegmentsAtRootMetric, + IndexTotIndexTimeMetric, + IndexTotIndexedPlainTextBytesMetric, + IndexTotIntroduceLoopMetric, + IndexTotIntroduceMergeBegMetric, + IndexTotIntroduceMergeEndMetric, + IndexTotIntroducePersistBegMetric, + IndexTotIntroducePersistEndMetric, + IndexTotIntroduceRevertBegMetric, + IndexTotIntroduceRevertEndMetric, + IndexTotIntroduceSegmentBegMetric, + IndexTotIntroduceSegmentEndMetric, + IndexTotIntroducedItemsMetric, + IndexTotIntroducedSegmentsBatchMetric, + IndexTotIntroducedSegmentsMergeMetric, + IndexTotItemsToPersistMetric, + IndexTotMemMergeBegMetric, + IndexTotMemMergeDoneMetric, + IndexTotMemMergeErrMetric, + IndexTotMemMergeSegmentsMetric, + IndexTotMemMergeZapBegMetric, + IndexTotMemMergeZapEndMetric, + IndexTotMemMergeZapTimeMetric, + IndexTotMemorySegmentsAtRootMetric, + IndexTotOnErrorsMetric, + IndexTotPersistLoopBegMetric, + IndexTotPersistLoopEndMetric, + IndexTotPersistLoopErrMetric, + IndexTotPersistLoopProgressMetric, + IndexTotPersistLoopWaitMetric, + IndexTotPersistLoopWaitNotifiedMetric, + IndexTotPersistedItemsMetric, + IndexTotPersistedSegmentsMetric, + IndexTotPersisterMergerNapBreakMetric, + IndexTotPersisterNapPauseCompletedMetric, + IndexTotPersisterSlowMergerPauseMetric, + IndexTotPersisterSlowMergerResumeMetric, + IndexTotTermSearchersFinishedMetric, + IndexTotTermSearchersStartedMetric, + IndexTotUpdatesMetric, + IndexAnalysisTimeMetric, + IndexBatchesMetric, + IndexDeletesMetric, + IndexErrorsMetric, + IndexIndexTimeMetric, + IndexNumBytesUsedDiskMetric, + IndexNumFilesOnDiskMetric, + IndexNumItemsIntroducedMetric, + IndexNumItemsPersistedMetric, + IndexNumPersisterNapMergerBreakMetric, + IndexNumPersisterNapPauseCompletedMetric, + IndexNumPlainTextBytesIndexedMetric, + IndexNumRecsToPersistMetric, + IndexNumRootFilesegmentsMetric, + IndexNumRootMemorysegmentsMetric, + IndexTermSearchersFinishedMetric, + IndexTermSearchersStartedMetric, + IndexTotalCompactionWrittenBytesMetric, + IndexUpdatesMetric, + SearchTimeMetric, + SearchesMetric, + ) + GrpcMetrics.EnableHandlingTimeHistogram( + func(o *prometheus.HistogramOpts) { + o.Namespace = "blast" + }, + ) +} diff --git a/protobuf/distribute/distribute.pb.go b/protobuf/distribute/distribute.pb.go deleted file mode 100644 index a942d09..0000000 --- a/protobuf/distribute/distribute.pb.go +++ /dev/null @@ -1,945 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: protobuf/distribute/distribute.proto - -package distribute - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" - empty "github.com/golang/protobuf/ptypes/empty" - index "github.com/mosuka/blast/protobuf/index" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type NodeHealthCheckRequest_Probe int32 - -const ( - NodeHealthCheckRequest_UNKNOWN NodeHealthCheckRequest_Probe = 0 - NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 1 - NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 2 - NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 3 -) - -var NodeHealthCheckRequest_Probe_name = map[int32]string{ - 0: "UNKNOWN", - 1: "HEALTHINESS", - 2: "LIVENESS", - 3: "READINESS", -} - -var NodeHealthCheckRequest_Probe_value = map[string]int32{ - "UNKNOWN": 0, - "HEALTHINESS": 1, - "LIVENESS": 2, - "READINESS": 3, -} - -func (x NodeHealthCheckRequest_Probe) String() string { - return proto.EnumName(NodeHealthCheckRequest_Probe_name, int32(x)) -} - -func (NodeHealthCheckRequest_Probe) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{0, 0} -} - -type NodeHealthCheckResponse_State int32 - -const ( - NodeHealthCheckResponse_UNKNOWN NodeHealthCheckResponse_State = 0 - NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 1 - NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 2 - NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 3 - NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 4 - NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 5 - NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 6 -) - -var NodeHealthCheckResponse_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "HEALTHY", - 2: "UNHEALTHY", - 3: "ALIVE", - 4: "DEAD", - 5: "READY", - 6: "NOT_READY", -} - -var NodeHealthCheckResponse_State_value = map[string]int32{ - "UNKNOWN": 0, - "HEALTHY": 1, - "UNHEALTHY": 2, - "ALIVE": 3, - "DEAD": 4, - "READY": 5, - "NOT_READY": 6, -} - -func (x NodeHealthCheckResponse_State) String() string { - return proto.EnumName(NodeHealthCheckResponse_State_name, int32(x)) -} - -func (NodeHealthCheckResponse_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{1, 0} -} - -type NodeHealthCheckRequest struct { - Probe NodeHealthCheckRequest_Probe `protobuf:"varint,1,opt,name=probe,proto3,enum=distribute.NodeHealthCheckRequest_Probe" json:"probe,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeHealthCheckRequest) Reset() { *m = NodeHealthCheckRequest{} } -func (m *NodeHealthCheckRequest) String() string { return proto.CompactTextString(m) } -func (*NodeHealthCheckRequest) ProtoMessage() {} -func (*NodeHealthCheckRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{0} -} - -func (m *NodeHealthCheckRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeHealthCheckRequest.Unmarshal(m, b) -} -func (m *NodeHealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeHealthCheckRequest.Marshal(b, m, deterministic) -} -func (m *NodeHealthCheckRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeHealthCheckRequest.Merge(m, src) -} -func (m *NodeHealthCheckRequest) XXX_Size() int { - return xxx_messageInfo_NodeHealthCheckRequest.Size(m) -} -func (m *NodeHealthCheckRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeHealthCheckRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeHealthCheckRequest proto.InternalMessageInfo - -func (m *NodeHealthCheckRequest) GetProbe() NodeHealthCheckRequest_Probe { - if m != nil { - return m.Probe - } - return NodeHealthCheckRequest_UNKNOWN -} - -type NodeHealthCheckResponse struct { - State NodeHealthCheckResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=distribute.NodeHealthCheckResponse_State" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeHealthCheckResponse) Reset() { *m = NodeHealthCheckResponse{} } -func (m *NodeHealthCheckResponse) String() string { return proto.CompactTextString(m) } -func (*NodeHealthCheckResponse) ProtoMessage() {} -func (*NodeHealthCheckResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{1} -} - -func (m *NodeHealthCheckResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeHealthCheckResponse.Unmarshal(m, b) -} -func (m *NodeHealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeHealthCheckResponse.Marshal(b, m, deterministic) -} -func (m *NodeHealthCheckResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeHealthCheckResponse.Merge(m, src) -} -func (m *NodeHealthCheckResponse) XXX_Size() int { - return xxx_messageInfo_NodeHealthCheckResponse.Size(m) -} -func (m *NodeHealthCheckResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeHealthCheckResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeHealthCheckResponse proto.InternalMessageInfo - -func (m *NodeHealthCheckResponse) GetState() NodeHealthCheckResponse_State { - if m != nil { - return m.State - } - return NodeHealthCheckResponse_UNKNOWN -} - -type GetRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetRequest) Reset() { *m = GetRequest{} } -func (m *GetRequest) String() string { return proto.CompactTextString(m) } -func (*GetRequest) ProtoMessage() {} -func (*GetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{2} -} - -func (m *GetRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetRequest.Unmarshal(m, b) -} -func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) -} -func (m *GetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetRequest.Merge(m, src) -} -func (m *GetRequest) XXX_Size() int { - return xxx_messageInfo_GetRequest.Size(m) -} -func (m *GetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetRequest proto.InternalMessageInfo - -func (m *GetRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -type GetResponse struct { - Fields *any.Any `protobuf:"bytes,1,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetResponse) Reset() { *m = GetResponse{} } -func (m *GetResponse) String() string { return proto.CompactTextString(m) } -func (*GetResponse) ProtoMessage() {} -func (*GetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{3} -} - -func (m *GetResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetResponse.Unmarshal(m, b) -} -func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) -} -func (m *GetResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetResponse.Merge(m, src) -} -func (m *GetResponse) XXX_Size() int { - return xxx_messageInfo_GetResponse.Size(m) -} -func (m *GetResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetResponse proto.InternalMessageInfo - -func (m *GetResponse) GetFields() *any.Any { - if m != nil { - return m.Fields - } - return nil -} - -type IndexRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexRequest) Reset() { *m = IndexRequest{} } -func (m *IndexRequest) String() string { return proto.CompactTextString(m) } -func (*IndexRequest) ProtoMessage() {} -func (*IndexRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{4} -} - -func (m *IndexRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexRequest.Unmarshal(m, b) -} -func (m *IndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexRequest.Marshal(b, m, deterministic) -} -func (m *IndexRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexRequest.Merge(m, src) -} -func (m *IndexRequest) XXX_Size() int { - return xxx_messageInfo_IndexRequest.Size(m) -} -func (m *IndexRequest) XXX_DiscardUnknown() { - xxx_messageInfo_IndexRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexRequest proto.InternalMessageInfo - -func (m *IndexRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *IndexRequest) GetFields() *any.Any { - if m != nil { - return m.Fields - } - return nil -} - -type DeleteRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRequest) ProtoMessage() {} -func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{5} -} - -func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) -} -func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) -} -func (m *DeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRequest.Merge(m, src) -} -func (m *DeleteRequest) XXX_Size() int { - return xxx_messageInfo_DeleteRequest.Size(m) -} -func (m *DeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo - -func (m *DeleteRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -type BulkIndexRequest struct { - Documents []*index.Document `protobuf:"bytes,1,rep,name=documents,proto3" json:"documents,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BulkIndexRequest) Reset() { *m = BulkIndexRequest{} } -func (m *BulkIndexRequest) String() string { return proto.CompactTextString(m) } -func (*BulkIndexRequest) ProtoMessage() {} -func (*BulkIndexRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{6} -} - -func (m *BulkIndexRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkIndexRequest.Unmarshal(m, b) -} -func (m *BulkIndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkIndexRequest.Marshal(b, m, deterministic) -} -func (m *BulkIndexRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkIndexRequest.Merge(m, src) -} -func (m *BulkIndexRequest) XXX_Size() int { - return xxx_messageInfo_BulkIndexRequest.Size(m) -} -func (m *BulkIndexRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BulkIndexRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BulkIndexRequest proto.InternalMessageInfo - -func (m *BulkIndexRequest) GetDocuments() []*index.Document { - if m != nil { - return m.Documents - } - return nil -} - -type BulkIndexResponse struct { - Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BulkIndexResponse) Reset() { *m = BulkIndexResponse{} } -func (m *BulkIndexResponse) String() string { return proto.CompactTextString(m) } -func (*BulkIndexResponse) ProtoMessage() {} -func (*BulkIndexResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{7} -} - -func (m *BulkIndexResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkIndexResponse.Unmarshal(m, b) -} -func (m *BulkIndexResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkIndexResponse.Marshal(b, m, deterministic) -} -func (m *BulkIndexResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkIndexResponse.Merge(m, src) -} -func (m *BulkIndexResponse) XXX_Size() int { - return xxx_messageInfo_BulkIndexResponse.Size(m) -} -func (m *BulkIndexResponse) XXX_DiscardUnknown() { - xxx_messageInfo_BulkIndexResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_BulkIndexResponse proto.InternalMessageInfo - -func (m *BulkIndexResponse) GetCount() int32 { - if m != nil { - return m.Count - } - return 0 -} - -type BulkDeleteRequest struct { - Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BulkDeleteRequest) Reset() { *m = BulkDeleteRequest{} } -func (m *BulkDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*BulkDeleteRequest) ProtoMessage() {} -func (*BulkDeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{8} -} - -func (m *BulkDeleteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkDeleteRequest.Unmarshal(m, b) -} -func (m *BulkDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkDeleteRequest.Marshal(b, m, deterministic) -} -func (m *BulkDeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkDeleteRequest.Merge(m, src) -} -func (m *BulkDeleteRequest) XXX_Size() int { - return xxx_messageInfo_BulkDeleteRequest.Size(m) -} -func (m *BulkDeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BulkDeleteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BulkDeleteRequest proto.InternalMessageInfo - -func (m *BulkDeleteRequest) GetIds() []string { - if m != nil { - return m.Ids - } - return nil -} - -type BulkDeleteResponse struct { - Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BulkDeleteResponse) Reset() { *m = BulkDeleteResponse{} } -func (m *BulkDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*BulkDeleteResponse) ProtoMessage() {} -func (*BulkDeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{9} -} - -func (m *BulkDeleteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkDeleteResponse.Unmarshal(m, b) -} -func (m *BulkDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkDeleteResponse.Marshal(b, m, deterministic) -} -func (m *BulkDeleteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkDeleteResponse.Merge(m, src) -} -func (m *BulkDeleteResponse) XXX_Size() int { - return xxx_messageInfo_BulkDeleteResponse.Size(m) -} -func (m *BulkDeleteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_BulkDeleteResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_BulkDeleteResponse proto.InternalMessageInfo - -func (m *BulkDeleteResponse) GetCount() int32 { - if m != nil { - return m.Count - } - return 0 -} - -type SearchRequest struct { - SearchRequest *any.Any `protobuf:"bytes,1,opt,name=search_request,json=searchRequest,proto3" json:"search_request,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SearchRequest) Reset() { *m = SearchRequest{} } -func (m *SearchRequest) String() string { return proto.CompactTextString(m) } -func (*SearchRequest) ProtoMessage() {} -func (*SearchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{10} -} - -func (m *SearchRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SearchRequest.Unmarshal(m, b) -} -func (m *SearchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SearchRequest.Marshal(b, m, deterministic) -} -func (m *SearchRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SearchRequest.Merge(m, src) -} -func (m *SearchRequest) XXX_Size() int { - return xxx_messageInfo_SearchRequest.Size(m) -} -func (m *SearchRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SearchRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SearchRequest proto.InternalMessageInfo - -func (m *SearchRequest) GetSearchRequest() *any.Any { - if m != nil { - return m.SearchRequest - } - return nil -} - -type SearchResponse struct { - SearchResult *any.Any `protobuf:"bytes,1,opt,name=search_result,json=searchResult,proto3" json:"search_result,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SearchResponse) Reset() { *m = SearchResponse{} } -func (m *SearchResponse) String() string { return proto.CompactTextString(m) } -func (*SearchResponse) ProtoMessage() {} -func (*SearchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{11} -} - -func (m *SearchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SearchResponse.Unmarshal(m, b) -} -func (m *SearchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SearchResponse.Marshal(b, m, deterministic) -} -func (m *SearchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SearchResponse.Merge(m, src) -} -func (m *SearchResponse) XXX_Size() int { - return xxx_messageInfo_SearchResponse.Size(m) -} -func (m *SearchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SearchResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SearchResponse proto.InternalMessageInfo - -func (m *SearchResponse) GetSearchResult() *any.Any { - if m != nil { - return m.SearchResult - } - return nil -} - -func init() { - proto.RegisterEnum("distribute.NodeHealthCheckRequest_Probe", NodeHealthCheckRequest_Probe_name, NodeHealthCheckRequest_Probe_value) - proto.RegisterEnum("distribute.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) - proto.RegisterType((*NodeHealthCheckRequest)(nil), "distribute.NodeHealthCheckRequest") - proto.RegisterType((*NodeHealthCheckResponse)(nil), "distribute.NodeHealthCheckResponse") - proto.RegisterType((*GetRequest)(nil), "distribute.GetRequest") - proto.RegisterType((*GetResponse)(nil), "distribute.GetResponse") - proto.RegisterType((*IndexRequest)(nil), "distribute.IndexRequest") - proto.RegisterType((*DeleteRequest)(nil), "distribute.DeleteRequest") - proto.RegisterType((*BulkIndexRequest)(nil), "distribute.BulkIndexRequest") - proto.RegisterType((*BulkIndexResponse)(nil), "distribute.BulkIndexResponse") - proto.RegisterType((*BulkDeleteRequest)(nil), "distribute.BulkDeleteRequest") - proto.RegisterType((*BulkDeleteResponse)(nil), "distribute.BulkDeleteResponse") - proto.RegisterType((*SearchRequest)(nil), "distribute.SearchRequest") - proto.RegisterType((*SearchResponse)(nil), "distribute.SearchResponse") -} - -func init() { - proto.RegisterFile("protobuf/distribute/distribute.proto", fileDescriptor_0b1b3e8a99d31c9c) -} - -var fileDescriptor_0b1b3e8a99d31c9c = []byte{ - // 759 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0xdd, 0x4e, 0xdb, 0x48, - 0x18, 0x5d, 0x27, 0x38, 0x90, 0x2f, 0x24, 0xf1, 0xce, 0x42, 0x00, 0x6f, 0xd8, 0x5d, 0x79, 0x77, - 0x25, 0xf0, 0x2e, 0xb6, 0x36, 0xdb, 0x9b, 0x82, 0xda, 0x2a, 0x34, 0x11, 0x20, 0xa2, 0x50, 0x39, - 0x40, 0x05, 0x52, 0x45, 0x9d, 0x78, 0x20, 0x56, 0x1c, 0x3b, 0x8d, 0xc7, 0x15, 0xa8, 0xea, 0x4d, - 0x5f, 0xa1, 0xb7, 0x7d, 0x93, 0x5e, 0xf6, 0x11, 0xfa, 0x0a, 0x7d, 0x90, 0x6a, 0x66, 0xec, 0xc4, - 0x26, 0x3f, 0xe5, 0x06, 0xf9, 0xfb, 0x3b, 0xe7, 0xcc, 0x7c, 0x67, 0x08, 0xfc, 0x35, 0x18, 0x7a, - 0xc4, 0x6b, 0x07, 0xd7, 0xba, 0x65, 0xfb, 0x64, 0x68, 0xb7, 0x03, 0x82, 0x63, 0x9f, 0x1a, 0x2b, - 0x23, 0x18, 0x67, 0xe4, 0x8d, 0x1b, 0xcf, 0xbb, 0x71, 0xb0, 0x3e, 0x1a, 0x34, 0xdd, 0x3b, 0xde, - 0x26, 0xff, 0x7a, 0xbf, 0x84, 0xfb, 0x03, 0x12, 0x15, 0xe5, 0x51, 0xd6, 0x76, 0x2d, 0x7c, 0xcb, - 0xff, 0x86, 0xb5, 0x72, 0x38, 0x68, 0x0e, 0x6c, 0xdd, 0x74, 0x5d, 0x8f, 0x98, 0xc4, 0xf6, 0x5c, - 0x9f, 0x57, 0x95, 0x4f, 0x02, 0x94, 0x9a, 0x9e, 0x85, 0x0f, 0xb1, 0xe9, 0x90, 0xee, 0xf3, 0x2e, - 0xee, 0xf4, 0x0c, 0xfc, 0x26, 0xc0, 0x3e, 0x41, 0x4f, 0x41, 0x1c, 0x0c, 0xbd, 0x36, 0x5e, 0x17, - 0xfe, 0x10, 0xb6, 0x0a, 0x95, 0x2d, 0x2d, 0x26, 0x7d, 0xfa, 0x88, 0xf6, 0x82, 0xf6, 0x1b, 0x7c, - 0x4c, 0xd9, 0x07, 0x91, 0xc5, 0x28, 0x07, 0x8b, 0x67, 0xcd, 0xe3, 0xe6, 0xc9, 0xcb, 0xa6, 0xf4, - 0x13, 0x2a, 0x42, 0xee, 0xb0, 0x5e, 0x6d, 0x9c, 0x1e, 0x1e, 0x35, 0xeb, 0xad, 0x96, 0x24, 0xa0, - 0x65, 0x58, 0x6a, 0x1c, 0x9d, 0xd7, 0x59, 0x94, 0x42, 0x79, 0xc8, 0x1a, 0xf5, 0x6a, 0x8d, 0x17, - 0xd3, 0xca, 0x67, 0x01, 0xd6, 0x26, 0xb8, 0xfc, 0x81, 0xe7, 0xfa, 0x18, 0x3d, 0x03, 0xd1, 0x27, - 0x26, 0x89, 0xf4, 0x6d, 0xcf, 0xd5, 0xc7, 0x67, 0xb4, 0x16, 0x1d, 0x30, 0xf8, 0x9c, 0x72, 0x05, - 0x22, 0x8b, 0x93, 0x02, 0x73, 0xb0, 0xc8, 0x05, 0x5e, 0x48, 0x02, 0x95, 0x73, 0xd6, 0x8c, 0xc2, - 0x14, 0xca, 0x82, 0x58, 0xa5, 0x62, 0xa5, 0x34, 0x5a, 0x82, 0x85, 0x5a, 0xbd, 0x5a, 0x93, 0x16, - 0x68, 0x92, 0x4a, 0xbe, 0x90, 0x44, 0xda, 0xde, 0x3c, 0x39, 0xbd, 0xe2, 0x61, 0x46, 0x29, 0x03, - 0x1c, 0x60, 0x12, 0xdd, 0x67, 0x01, 0x52, 0xb6, 0xc5, 0xc4, 0x66, 0x8d, 0x94, 0x6d, 0x29, 0x7b, - 0x90, 0x63, 0xd5, 0xf0, 0x38, 0xff, 0x42, 0xe6, 0xda, 0xc6, 0x8e, 0xe5, 0xb3, 0x96, 0x5c, 0x65, - 0x45, 0xe3, 0x8b, 0xd3, 0xa2, 0xdd, 0x6a, 0x55, 0xf7, 0xce, 0x08, 0x7b, 0x94, 0x06, 0x2c, 0x1f, - 0xd1, 0x25, 0xcf, 0x00, 0x8f, 0xa1, 0xa5, 0x1e, 0x80, 0xf6, 0x3b, 0xe4, 0x6b, 0xd8, 0xc1, 0x04, - 0xcf, 0xd2, 0x5a, 0x05, 0x69, 0x3f, 0x70, 0x7a, 0x09, 0xca, 0x1d, 0xc8, 0x5a, 0x5e, 0x27, 0xe8, - 0x63, 0x97, 0x50, 0xcd, 0xe9, 0xad, 0x5c, 0xa5, 0xa8, 0x71, 0xe7, 0xd5, 0xc2, 0xbc, 0x31, 0xee, - 0x50, 0xb6, 0xe1, 0xe7, 0x18, 0x44, 0x78, 0xe8, 0x15, 0x10, 0x3b, 0x5e, 0xe0, 0x12, 0x46, 0x25, - 0x1a, 0x3c, 0x50, 0xfe, 0xe6, 0xad, 0x49, 0x49, 0x12, 0xa4, 0x6d, 0x8b, 0x13, 0x65, 0x0d, 0xfa, - 0xa9, 0xa8, 0x80, 0xe2, 0x6d, 0x73, 0x21, 0x1b, 0x90, 0x6f, 0x61, 0x73, 0xd8, 0xe9, 0x46, 0x70, - 0x7b, 0x50, 0xf0, 0x59, 0xe2, 0x6a, 0xc8, 0x33, 0x73, 0xaf, 0x3d, 0xef, 0xc7, 0x87, 0x95, 0x63, - 0x28, 0x44, 0x68, 0x21, 0xeb, 0x63, 0xc8, 0x8f, 0xe0, 0xfc, 0xc0, 0x99, 0x8f, 0xb6, 0x1c, 0xa1, - 0xd1, 0xce, 0xca, 0x17, 0x11, 0xa0, 0x36, 0xb2, 0x2e, 0xba, 0x85, 0xe2, 0x3d, 0xf7, 0x22, 0xe5, - 0xc7, 0x4f, 0x4f, 0xfe, 0xf3, 0x01, 0xf6, 0x57, 0xca, 0x1f, 0xbe, 0x7e, 0xfb, 0x98, 0x2a, 0xa1, - 0x15, 0xfd, 0xed, 0x7f, 0xba, 0xeb, 0x59, 0x58, 0xef, 0xb2, 0xae, 0x0e, 0xa3, 0x39, 0x83, 0xf4, - 0x01, 0x26, 0xa8, 0x14, 0x47, 0x1a, 0xfb, 0x57, 0x5e, 0x9b, 0xc8, 0x87, 0xa8, 0x9b, 0x0c, 0x75, - 0x0d, 0xad, 0x52, 0xd4, 0xd1, 0xc2, 0xf5, 0x77, 0xb6, 0xf5, 0x44, 0x55, 0xdf, 0x23, 0x0f, 0x44, - 0xb6, 0x74, 0xb4, 0x1e, 0x07, 0x88, 0x5b, 0x49, 0x2e, 0x4d, 0x5c, 0x53, 0x9d, 0xfe, 0x77, 0x53, - 0x1e, 0x31, 0x64, 0x4d, 0xce, 0x27, 0x90, 0x77, 0x05, 0xf5, 0x52, 0x96, 0xa7, 0xb3, 0xed, 0x0a, - 0x2a, 0xba, 0x84, 0x0c, 0xf7, 0x04, 0xda, 0x88, 0x33, 0x26, 0xec, 0x34, 0x93, 0x32, 0x3c, 0x8c, - 0x3a, 0xe3, 0x30, 0xaf, 0x20, 0x3b, 0x72, 0x31, 0x2a, 0xc7, 0xe1, 0xef, 0xbf, 0x0f, 0x79, 0x73, - 0x46, 0x35, 0xbc, 0xb5, 0x5f, 0x18, 0x51, 0x5e, 0x5e, 0xa2, 0x44, 0xed, 0xc0, 0xe9, 0x51, 0xe9, - 0xaf, 0x01, 0xc6, 0x96, 0x46, 0x13, 0x08, 0xc9, 0x23, 0xfc, 0x36, 0xab, 0x9c, 0x64, 0x50, 0x13, - 0x0c, 0xe7, 0x90, 0xe1, 0xd6, 0x4d, 0x5e, 0x4e, 0xe2, 0x71, 0xc8, 0xf2, 0xb4, 0x52, 0x88, 0xba, - 0xca, 0x50, 0x8b, 0x0a, 0x50, 0x54, 0x6e, 0xe4, 0x5d, 0x41, 0xdd, 0xdf, 0xb9, 0xfc, 0xe7, 0xc6, - 0x26, 0xdd, 0xa0, 0xad, 0x75, 0xbc, 0xbe, 0xde, 0xf7, 0xfc, 0xa0, 0x67, 0xea, 0x6d, 0xc7, 0xf4, - 0x89, 0x3e, 0xe5, 0x67, 0xb0, 0x9d, 0x61, 0xc9, 0xff, 0xbf, 0x07, 0x00, 0x00, 0xff, 0xff, 0x7f, - 0xf4, 0x78, 0x1a, 0x24, 0x07, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// DistributeClient is the client API for Distribute service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type DistributeClient interface { - NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) - Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) - Index(ctx context.Context, in *IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) - Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) - BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) - BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) - Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) -} - -type distributeClient struct { - cc *grpc.ClientConn -} - -func NewDistributeClient(cc *grpc.ClientConn) DistributeClient { - return &distributeClient{cc} -} - -func (c *distributeClient) NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) { - out := new(NodeHealthCheckResponse) - err := c.cc.Invoke(ctx, "/distribute.Distribute/NodeHealthCheck", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *distributeClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { - out := new(GetResponse) - err := c.cc.Invoke(ctx, "/distribute.Distribute/Get", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *distributeClient) Index(ctx context.Context, in *IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/distribute.Distribute/Index", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *distributeClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/distribute.Distribute/Delete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *distributeClient) BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) { - out := new(BulkIndexResponse) - err := c.cc.Invoke(ctx, "/distribute.Distribute/BulkIndex", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *distributeClient) BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) { - out := new(BulkDeleteResponse) - err := c.cc.Invoke(ctx, "/distribute.Distribute/BulkDelete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *distributeClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { - out := new(SearchResponse) - err := c.cc.Invoke(ctx, "/distribute.Distribute/Search", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// DistributeServer is the server API for Distribute service. -type DistributeServer interface { - NodeHealthCheck(context.Context, *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) - Get(context.Context, *GetRequest) (*GetResponse, error) - Index(context.Context, *IndexRequest) (*empty.Empty, error) - Delete(context.Context, *DeleteRequest) (*empty.Empty, error) - BulkIndex(context.Context, *BulkIndexRequest) (*BulkIndexResponse, error) - BulkDelete(context.Context, *BulkDeleteRequest) (*BulkDeleteResponse, error) - Search(context.Context, *SearchRequest) (*SearchResponse, error) -} - -// UnimplementedDistributeServer can be embedded to have forward compatible implementations. -type UnimplementedDistributeServer struct { -} - -func (*UnimplementedDistributeServer) NodeHealthCheck(ctx context.Context, req *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodeHealthCheck not implemented") -} -func (*UnimplementedDistributeServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") -} -func (*UnimplementedDistributeServer) Index(ctx context.Context, req *IndexRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Index not implemented") -} -func (*UnimplementedDistributeServer) Delete(ctx context.Context, req *DeleteRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} -func (*UnimplementedDistributeServer) BulkIndex(ctx context.Context, req *BulkIndexRequest) (*BulkIndexResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method BulkIndex not implemented") -} -func (*UnimplementedDistributeServer) BulkDelete(ctx context.Context, req *BulkDeleteRequest) (*BulkDeleteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method BulkDelete not implemented") -} -func (*UnimplementedDistributeServer) Search(ctx context.Context, req *SearchRequest) (*SearchResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Search not implemented") -} - -func RegisterDistributeServer(s *grpc.Server, srv DistributeServer) { - s.RegisterService(&_Distribute_serviceDesc, srv) -} - -func _Distribute_NodeHealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeHealthCheckRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DistributeServer).NodeHealthCheck(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/distribute.Distribute/NodeHealthCheck", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).NodeHealthCheck(ctx, req.(*NodeHealthCheckRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Distribute_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DistributeServer).Get(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/distribute.Distribute/Get", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).Get(ctx, req.(*GetRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Distribute_Index_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(IndexRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DistributeServer).Index(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/distribute.Distribute/Index", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).Index(ctx, req.(*IndexRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Distribute_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DistributeServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/distribute.Distribute/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).Delete(ctx, req.(*DeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Distribute_BulkIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(BulkIndexRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DistributeServer).BulkIndex(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/distribute.Distribute/BulkIndex", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).BulkIndex(ctx, req.(*BulkIndexRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Distribute_BulkDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(BulkDeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DistributeServer).BulkDelete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/distribute.Distribute/BulkDelete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).BulkDelete(ctx, req.(*BulkDeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Distribute_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SearchRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DistributeServer).Search(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/distribute.Distribute/Search", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).Search(ctx, req.(*SearchRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Distribute_serviceDesc = grpc.ServiceDesc{ - ServiceName: "distribute.Distribute", - HandlerType: (*DistributeServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "NodeHealthCheck", - Handler: _Distribute_NodeHealthCheck_Handler, - }, - { - MethodName: "Get", - Handler: _Distribute_Get_Handler, - }, - { - MethodName: "Index", - Handler: _Distribute_Index_Handler, - }, - { - MethodName: "Delete", - Handler: _Distribute_Delete_Handler, - }, - { - MethodName: "BulkIndex", - Handler: _Distribute_BulkIndex_Handler, - }, - { - MethodName: "BulkDelete", - Handler: _Distribute_BulkDelete_Handler, - }, - { - MethodName: "Search", - Handler: _Distribute_Search_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "protobuf/distribute/distribute.proto", -} diff --git a/protobuf/distribute/distribute.pb.gw.go b/protobuf/distribute/distribute.pb.gw.go deleted file mode 100644 index e540253..0000000 --- a/protobuf/distribute/distribute.pb.gw.go +++ /dev/null @@ -1,443 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: protobuf/distribute/distribute.proto - -/* -Package distribute is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package distribute - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray - -var ( - filter_Distribute_NodeHealthCheck_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Distribute_NodeHealthCheck_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq NodeHealthCheckRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Distribute_NodeHealthCheck_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.NodeHealthCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Distribute_Get_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") - } - - protoReq.Id, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) - } - - msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Distribute_Index_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq IndexRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Index(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Distribute_Index_1(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq IndexRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") - } - - protoReq.Id, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) - } - - msg, err := client.Index(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Distribute_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DeleteRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") - } - - protoReq.Id, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) - } - - msg, err := client.Delete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Distribute_BulkIndex_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq BulkIndexRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.BulkIndex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Distribute_BulkDelete_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq BulkDeleteRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.BulkDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Distribute_Search_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SearchRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Search(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -// RegisterDistributeHandlerFromEndpoint is same as RegisterDistributeHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterDistributeHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterDistributeHandler(ctx, mux, conn) -} - -// RegisterDistributeHandler registers the http handlers for service Distribute to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterDistributeHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterDistributeHandlerClient(ctx, mux, NewDistributeClient(conn)) -} - -// RegisterDistributeHandlerClient registers the http handlers for service Distribute -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "DistributeClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "DistributeClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "DistributeClient" to call the correct interceptors. -func RegisterDistributeHandlerClient(ctx context.Context, mux *runtime.ServeMux, client DistributeClient) error { - - mux.Handle("GET", pattern_Distribute_NodeHealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Distribute_NodeHealthCheck_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Distribute_NodeHealthCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Distribute_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Distribute_Get_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Distribute_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("PUT", pattern_Distribute_Index_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Distribute_Index_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Distribute_Index_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("PUT", pattern_Distribute_Index_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Distribute_Index_1(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Distribute_Index_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_Distribute_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Distribute_Delete_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Distribute_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("PUT", pattern_Distribute_BulkIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Distribute_BulkIndex_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Distribute_BulkIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_Distribute_BulkDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Distribute_BulkDelete_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Distribute_BulkDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Distribute_Search_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Distribute_Search_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Distribute_Search_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Distribute_NodeHealthCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "healthcheck"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Distribute_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Distribute_Index_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "documents"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Distribute_Index_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Distribute_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Distribute_BulkIndex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "bulk"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Distribute_BulkDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "bulk"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Distribute_Search_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "search"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Distribute_NodeHealthCheck_0 = runtime.ForwardResponseMessage - - forward_Distribute_Get_0 = runtime.ForwardResponseMessage - - forward_Distribute_Index_0 = runtime.ForwardResponseMessage - - forward_Distribute_Index_1 = runtime.ForwardResponseMessage - - forward_Distribute_Delete_0 = runtime.ForwardResponseMessage - - forward_Distribute_BulkIndex_0 = runtime.ForwardResponseMessage - - forward_Distribute_BulkDelete_0 = runtime.ForwardResponseMessage - - forward_Distribute_Search_0 = runtime.ForwardResponseMessage -) diff --git a/protobuf/distribute/distribute.proto b/protobuf/distribute/distribute.proto deleted file mode 100644 index beaf5a6..0000000 --- a/protobuf/distribute/distribute.proto +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -import "google/protobuf/any.proto"; -import "google/protobuf/empty.proto"; -import "protobuf/index/index.proto"; -import "google/api/annotations.proto"; - -package distribute; - -option go_package = "github.com/mosuka/blast/protobuf/distribute"; - -service Distribute { - rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) { - option (google.api.http) = { - get: "/v1/node/healthcheck" - }; - } - - rpc Get (GetRequest) returns (GetResponse) { - option (google.api.http) = { - get: "/v1/documents/{id=**}" - }; - } - rpc Index (IndexRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - put: "/v1/documents" - body: "*" - additional_bindings { - put: "/v1/documents/{id=**}" - body: "*" - } - }; - } - rpc Delete (DeleteRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/documents/{id=**}" - }; - } - rpc BulkIndex (BulkIndexRequest) returns (BulkIndexResponse) { - option (google.api.http) = { - put: "/v1/bulk" - body: "*" - }; - } - rpc BulkDelete (BulkDeleteRequest) returns (BulkDeleteResponse) { - option (google.api.http) = { - delete: "/v1/bulk" - body: "*" - }; - } - rpc Search (SearchRequest) returns (SearchResponse) { - option (google.api.http) = { - post: "/v1/search" - body: "*" - }; - } -} - -message NodeHealthCheckRequest { - enum Probe { - UNKNOWN = 0; - HEALTHINESS = 1; - LIVENESS = 2; - READINESS = 3; - } - Probe probe = 1; -} - -message NodeHealthCheckResponse { - enum State { - UNKNOWN = 0; - HEALTHY = 1; - UNHEALTHY = 2; - ALIVE = 3; - DEAD = 4; - READY = 5; - NOT_READY = 6; - } - State state = 1; -} - -message GetRequest { - string id = 1; -} - -message GetResponse { - google.protobuf.Any fields = 1; -} - -message IndexRequest { - string id = 1; - google.protobuf.Any fields = 2; -} - -message DeleteRequest { - string id = 1; -} - -message BulkIndexRequest { - repeated index.Document documents = 1; -} - -message BulkIndexResponse { - int32 count = 1; -} - -message BulkDeleteRequest { - repeated string ids = 1; -} - -message BulkDeleteResponse { - int32 count = 1; -} - -message SearchRequest { - google.protobuf.Any search_request = 1; -} - -message SearchResponse { - google.protobuf.Any search_result = 1; -} diff --git a/protobuf/distribute/distribute.swagger.json b/protobuf/distribute/distribute.swagger.json deleted file mode 100644 index 8ddf64d..0000000 --- a/protobuf/distribute/distribute.swagger.json +++ /dev/null @@ -1,362 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "protobuf/distribute/distribute.proto", - "version": "version not set" - }, - "schemes": [ - "http", - "https" - ], - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/v1/bulk": { - "delete": { - "operationId": "BulkDelete", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/distributeBulkDeleteResponse" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/distributeBulkDeleteRequest" - } - } - ], - "tags": [ - "Distribute" - ] - }, - "put": { - "operationId": "BulkIndex", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/distributeBulkIndexResponse" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/distributeBulkIndexRequest" - } - } - ], - "tags": [ - "Distribute" - ] - } - }, - "/v1/documents": { - "put": { - "operationId": "Index", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "properties": {} - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/distributeIndexRequest" - } - } - ], - "tags": [ - "Distribute" - ] - } - }, - "/v1/documents/{id}": { - "get": { - "operationId": "Get", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/distributeGetResponse" - } - } - }, - "parameters": [ - { - "name": "id", - "in": "path", - "required": true, - "type": "string" - } - ], - "tags": [ - "Distribute" - ] - }, - "delete": { - "operationId": "Delete", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "properties": {} - } - } - }, - "parameters": [ - { - "name": "id", - "in": "path", - "required": true, - "type": "string" - } - ], - "tags": [ - "Distribute" - ] - }, - "put": { - "operationId": "Index2", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "properties": {} - } - } - }, - "parameters": [ - { - "name": "id", - "in": "path", - "required": true, - "type": "string" - }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/distributeIndexRequest" - } - } - ], - "tags": [ - "Distribute" - ] - } - }, - "/v1/node/healthcheck": { - "get": { - "operationId": "NodeHealthCheck", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/distributeNodeHealthCheckResponse" - } - } - }, - "parameters": [ - { - "name": "probe", - "in": "query", - "required": false, - "type": "string", - "enum": [ - "UNKNOWN", - "HEALTHINESS", - "LIVENESS", - "READINESS" - ], - "default": "UNKNOWN" - } - ], - "tags": [ - "Distribute" - ] - } - }, - "/v1/search": { - "post": { - "operationId": "Search", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/distributeSearchResponse" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/distributeSearchRequest" - } - } - ], - "tags": [ - "Distribute" - ] - } - } - }, - "definitions": { - "distributeBulkDeleteRequest": { - "type": "object", - "properties": { - "ids": { - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "distributeBulkDeleteResponse": { - "type": "object", - "properties": { - "count": { - "type": "integer", - "format": "int32" - } - } - }, - "distributeBulkIndexRequest": { - "type": "object", - "properties": { - "documents": { - "type": "array", - "items": { - "$ref": "#/definitions/indexDocument" - } - } - } - }, - "distributeBulkIndexResponse": { - "type": "object", - "properties": { - "count": { - "type": "integer", - "format": "int32" - } - } - }, - "distributeGetResponse": { - "type": "object", - "properties": { - "fields": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "distributeIndexRequest": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "fields": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "distributeNodeHealthCheckRequestProbe": { - "type": "string", - "enum": [ - "UNKNOWN", - "HEALTHINESS", - "LIVENESS", - "READINESS" - ], - "default": "UNKNOWN" - }, - "distributeNodeHealthCheckResponse": { - "type": "object", - "properties": { - "state": { - "$ref": "#/definitions/distributeNodeHealthCheckResponseState" - } - } - }, - "distributeNodeHealthCheckResponseState": { - "type": "string", - "enum": [ - "UNKNOWN", - "HEALTHY", - "UNHEALTHY", - "ALIVE", - "DEAD", - "READY", - "NOT_READY" - ], - "default": "UNKNOWN" - }, - "distributeSearchRequest": { - "type": "object", - "properties": { - "search_request": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "distributeSearchResponse": { - "type": "object", - "properties": { - "search_result": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "indexDocument": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "fields": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string", - "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." - }, - "value": { - "type": "string", - "format": "byte", - "description": "Must be a valid serialized protocol buffer of the above specified type." - } - }, - "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" - } - } -} diff --git a/protobuf/index.pb.go b/protobuf/index.pb.go new file mode 100644 index 0000000..cddc642 --- /dev/null +++ b/protobuf/index.pb.go @@ -0,0 +1,1913 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: protobuf/index.proto + +package protobuf + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + empty "github.com/golang/protobuf/ptypes/empty" + _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Event_Type int32 + +const ( + Event_Unknown Event_Type = 0 + Event_Join Event_Type = 1 + Event_Leave Event_Type = 2 + Event_Set Event_Type = 3 + Event_Delete Event_Type = 4 + Event_BulkIndex Event_Type = 5 + Event_BulkDelete Event_Type = 6 +) + +var Event_Type_name = map[int32]string{ + 0: "Unknown", + 1: "Join", + 2: "Leave", + 3: "Set", + 4: "Delete", + 5: "BulkIndex", + 6: "BulkDelete", +} + +var Event_Type_value = map[string]int32{ + "Unknown": 0, + "Join": 1, + "Leave": 2, + "Set": 3, + "Delete": 4, + "BulkIndex": 5, + "BulkDelete": 6, +} + +func (x Event_Type) String() string { + return proto.EnumName(Event_Type_name, int32(x)) +} + +func (Event_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{23, 0} +} + +type LivenessCheckResponse struct { + Alive bool `protobuf:"varint,1,opt,name=alive,proto3" json:"alive,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LivenessCheckResponse) Reset() { *m = LivenessCheckResponse{} } +func (m *LivenessCheckResponse) String() string { return proto.CompactTextString(m) } +func (*LivenessCheckResponse) ProtoMessage() {} +func (*LivenessCheckResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{0} +} + +func (m *LivenessCheckResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LivenessCheckResponse.Unmarshal(m, b) +} +func (m *LivenessCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LivenessCheckResponse.Marshal(b, m, deterministic) +} +func (m *LivenessCheckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LivenessCheckResponse.Merge(m, src) +} +func (m *LivenessCheckResponse) XXX_Size() int { + return xxx_messageInfo_LivenessCheckResponse.Size(m) +} +func (m *LivenessCheckResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LivenessCheckResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LivenessCheckResponse proto.InternalMessageInfo + +func (m *LivenessCheckResponse) GetAlive() bool { + if m != nil { + return m.Alive + } + return false +} + +type ReadinessCheckResponse struct { + Ready bool `protobuf:"varint,1,opt,name=ready,proto3" json:"ready,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadinessCheckResponse) Reset() { *m = ReadinessCheckResponse{} } +func (m *ReadinessCheckResponse) String() string { return proto.CompactTextString(m) } +func (*ReadinessCheckResponse) ProtoMessage() {} +func (*ReadinessCheckResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{1} +} + +func (m *ReadinessCheckResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadinessCheckResponse.Unmarshal(m, b) +} +func (m *ReadinessCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadinessCheckResponse.Marshal(b, m, deterministic) +} +func (m *ReadinessCheckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadinessCheckResponse.Merge(m, src) +} +func (m *ReadinessCheckResponse) XXX_Size() int { + return xxx_messageInfo_ReadinessCheckResponse.Size(m) +} +func (m *ReadinessCheckResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReadinessCheckResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadinessCheckResponse proto.InternalMessageInfo + +func (m *ReadinessCheckResponse) GetReady() bool { + if m != nil { + return m.Ready + } + return false +} + +type Metadata struct { + GrpcAddress string `protobuf:"bytes,1,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` + HttpAddress string `protobuf:"bytes,2,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{2} +} + +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metadata.Unmarshal(m, b) +} +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) +} +func (m *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(m, src) +} +func (m *Metadata) XXX_Size() int { + return xxx_messageInfo_Metadata.Size(m) +} +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) +} + +var xxx_messageInfo_Metadata proto.InternalMessageInfo + +func (m *Metadata) GetGrpcAddress() string { + if m != nil { + return m.GrpcAddress + } + return "" +} + +func (m *Metadata) GetHttpAddress() string { + if m != nil { + return m.HttpAddress + } + return "" +} + +type Node struct { + RaftAddress string `protobuf:"bytes,1,opt,name=raft_address,json=raftAddress,proto3" json:"raft_address,omitempty"` + Metadata *Metadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Node) Reset() { *m = Node{} } +func (m *Node) String() string { return proto.CompactTextString(m) } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{3} +} + +func (m *Node) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Node.Unmarshal(m, b) +} +func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Node.Marshal(b, m, deterministic) +} +func (m *Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_Node.Merge(m, src) +} +func (m *Node) XXX_Size() int { + return xxx_messageInfo_Node.Size(m) +} +func (m *Node) XXX_DiscardUnknown() { + xxx_messageInfo_Node.DiscardUnknown(m) +} + +var xxx_messageInfo_Node proto.InternalMessageInfo + +func (m *Node) GetRaftAddress() string { + if m != nil { + return m.RaftAddress + } + return "" +} + +func (m *Node) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Node) GetState() string { + if m != nil { + return m.State + } + return "" +} + +type Cluster struct { + Nodes map[string]*Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Leader string `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{4} +} + +func (m *Cluster) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Cluster.Unmarshal(m, b) +} +func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) +} +func (m *Cluster) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cluster.Merge(m, src) +} +func (m *Cluster) XXX_Size() int { + return xxx_messageInfo_Cluster.Size(m) +} +func (m *Cluster) XXX_DiscardUnknown() { + xxx_messageInfo_Cluster.DiscardUnknown(m) +} + +var xxx_messageInfo_Cluster proto.InternalMessageInfo + +func (m *Cluster) GetNodes() map[string]*Node { + if m != nil { + return m.Nodes + } + return nil +} + +func (m *Cluster) GetLeader() string { + if m != nil { + return m.Leader + } + return "" +} + +type JoinRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *JoinRequest) Reset() { *m = JoinRequest{} } +func (m *JoinRequest) String() string { return proto.CompactTextString(m) } +func (*JoinRequest) ProtoMessage() {} +func (*JoinRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{5} +} + +func (m *JoinRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_JoinRequest.Unmarshal(m, b) +} +func (m *JoinRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_JoinRequest.Marshal(b, m, deterministic) +} +func (m *JoinRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_JoinRequest.Merge(m, src) +} +func (m *JoinRequest) XXX_Size() int { + return xxx_messageInfo_JoinRequest.Size(m) +} +func (m *JoinRequest) XXX_DiscardUnknown() { + xxx_messageInfo_JoinRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_JoinRequest proto.InternalMessageInfo + +func (m *JoinRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *JoinRequest) GetNode() *Node { + if m != nil { + return m.Node + } + return nil +} + +type LeaveRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaveRequest) Reset() { *m = LeaveRequest{} } +func (m *LeaveRequest) String() string { return proto.CompactTextString(m) } +func (*LeaveRequest) ProtoMessage() {} +func (*LeaveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{6} +} + +func (m *LeaveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LeaveRequest.Unmarshal(m, b) +} +func (m *LeaveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LeaveRequest.Marshal(b, m, deterministic) +} +func (m *LeaveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaveRequest.Merge(m, src) +} +func (m *LeaveRequest) XXX_Size() int { + return xxx_messageInfo_LeaveRequest.Size(m) +} +func (m *LeaveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LeaveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaveRequest proto.InternalMessageInfo + +func (m *LeaveRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type NodeResponse struct { + Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeResponse) Reset() { *m = NodeResponse{} } +func (m *NodeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeResponse) ProtoMessage() {} +func (*NodeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{7} +} + +func (m *NodeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeResponse.Unmarshal(m, b) +} +func (m *NodeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeResponse.Marshal(b, m, deterministic) +} +func (m *NodeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeResponse.Merge(m, src) +} +func (m *NodeResponse) XXX_Size() int { + return xxx_messageInfo_NodeResponse.Size(m) +} +func (m *NodeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeResponse proto.InternalMessageInfo + +func (m *NodeResponse) GetNode() *Node { + if m != nil { + return m.Node + } + return nil +} + +type ClusterResponse struct { + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterResponse) Reset() { *m = ClusterResponse{} } +func (m *ClusterResponse) String() string { return proto.CompactTextString(m) } +func (*ClusterResponse) ProtoMessage() {} +func (*ClusterResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{8} +} + +func (m *ClusterResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterResponse.Unmarshal(m, b) +} +func (m *ClusterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterResponse.Marshal(b, m, deterministic) +} +func (m *ClusterResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterResponse.Merge(m, src) +} +func (m *ClusterResponse) XXX_Size() int { + return xxx_messageInfo_ClusterResponse.Size(m) +} +func (m *ClusterResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterResponse proto.InternalMessageInfo + +func (m *ClusterResponse) GetCluster() *Cluster { + if m != nil { + return m.Cluster + } + return nil +} + +type Document struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Fields []byte `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} +func (*Document) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{9} +} + +func (m *Document) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Document.Unmarshal(m, b) +} +func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Document.Marshal(b, m, deterministic) +} +func (m *Document) XXX_Merge(src proto.Message) { + xxx_messageInfo_Document.Merge(m, src) +} +func (m *Document) XXX_Size() int { + return xxx_messageInfo_Document.Size(m) +} +func (m *Document) XXX_DiscardUnknown() { + xxx_messageInfo_Document.DiscardUnknown(m) +} + +var xxx_messageInfo_Document proto.InternalMessageInfo + +func (m *Document) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Document) GetFields() []byte { + if m != nil { + return m.Fields + } + return nil +} + +type GetRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} +func (*GetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{10} +} + +func (m *GetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetRequest.Unmarshal(m, b) +} +func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) +} +func (m *GetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRequest.Merge(m, src) +} +func (m *GetRequest) XXX_Size() int { + return xxx_messageInfo_GetRequest.Size(m) +} +func (m *GetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetRequest proto.InternalMessageInfo + +func (m *GetRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type GetResponse struct { + Fields []byte `protobuf:"bytes,1,opt,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} +func (*GetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{11} +} + +func (m *GetResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetResponse.Unmarshal(m, b) +} +func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) +} +func (m *GetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResponse.Merge(m, src) +} +func (m *GetResponse) XXX_Size() int { + return xxx_messageInfo_GetResponse.Size(m) +} +func (m *GetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetResponse proto.InternalMessageInfo + +func (m *GetResponse) GetFields() []byte { + if m != nil { + return m.Fields + } + return nil +} + +type SetRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Fields []byte `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetRequest) Reset() { *m = SetRequest{} } +func (m *SetRequest) String() string { return proto.CompactTextString(m) } +func (*SetRequest) ProtoMessage() {} +func (*SetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{12} +} + +func (m *SetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetRequest.Unmarshal(m, b) +} +func (m *SetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetRequest.Marshal(b, m, deterministic) +} +func (m *SetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetRequest.Merge(m, src) +} +func (m *SetRequest) XXX_Size() int { + return xxx_messageInfo_SetRequest.Size(m) +} +func (m *SetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetRequest proto.InternalMessageInfo + +func (m *SetRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *SetRequest) GetFields() []byte { + if m != nil { + return m.Fields + } + return nil +} + +type DeleteRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } +func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRequest) ProtoMessage() {} +func (*DeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{13} +} + +func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) +} +func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) +} +func (m *DeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteRequest.Merge(m, src) +} +func (m *DeleteRequest) XXX_Size() int { + return xxx_messageInfo_DeleteRequest.Size(m) +} +func (m *DeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo + +func (m *DeleteRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type BulkIndexRequest struct { + Requests []*SetRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BulkIndexRequest) Reset() { *m = BulkIndexRequest{} } +func (m *BulkIndexRequest) String() string { return proto.CompactTextString(m) } +func (*BulkIndexRequest) ProtoMessage() {} +func (*BulkIndexRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{14} +} + +func (m *BulkIndexRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkIndexRequest.Unmarshal(m, b) +} +func (m *BulkIndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkIndexRequest.Marshal(b, m, deterministic) +} +func (m *BulkIndexRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkIndexRequest.Merge(m, src) +} +func (m *BulkIndexRequest) XXX_Size() int { + return xxx_messageInfo_BulkIndexRequest.Size(m) +} +func (m *BulkIndexRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BulkIndexRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BulkIndexRequest proto.InternalMessageInfo + +func (m *BulkIndexRequest) GetRequests() []*SetRequest { + if m != nil { + return m.Requests + } + return nil +} + +type BulkIndexResponse struct { + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BulkIndexResponse) Reset() { *m = BulkIndexResponse{} } +func (m *BulkIndexResponse) String() string { return proto.CompactTextString(m) } +func (*BulkIndexResponse) ProtoMessage() {} +func (*BulkIndexResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{15} +} + +func (m *BulkIndexResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkIndexResponse.Unmarshal(m, b) +} +func (m *BulkIndexResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkIndexResponse.Marshal(b, m, deterministic) +} +func (m *BulkIndexResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkIndexResponse.Merge(m, src) +} +func (m *BulkIndexResponse) XXX_Size() int { + return xxx_messageInfo_BulkIndexResponse.Size(m) +} +func (m *BulkIndexResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BulkIndexResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BulkIndexResponse proto.InternalMessageInfo + +func (m *BulkIndexResponse) GetCount() int32 { + if m != nil { + return m.Count + } + return 0 +} + +type BulkDeleteRequest struct { + Requests []*DeleteRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BulkDeleteRequest) Reset() { *m = BulkDeleteRequest{} } +func (m *BulkDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*BulkDeleteRequest) ProtoMessage() {} +func (*BulkDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{16} +} + +func (m *BulkDeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkDeleteRequest.Unmarshal(m, b) +} +func (m *BulkDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkDeleteRequest.Marshal(b, m, deterministic) +} +func (m *BulkDeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkDeleteRequest.Merge(m, src) +} +func (m *BulkDeleteRequest) XXX_Size() int { + return xxx_messageInfo_BulkDeleteRequest.Size(m) +} +func (m *BulkDeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BulkDeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BulkDeleteRequest proto.InternalMessageInfo + +func (m *BulkDeleteRequest) GetRequests() []*DeleteRequest { + if m != nil { + return m.Requests + } + return nil +} + +type BulkDeleteResponse struct { + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BulkDeleteResponse) Reset() { *m = BulkDeleteResponse{} } +func (m *BulkDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*BulkDeleteResponse) ProtoMessage() {} +func (*BulkDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{17} +} + +func (m *BulkDeleteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkDeleteResponse.Unmarshal(m, b) +} +func (m *BulkDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkDeleteResponse.Marshal(b, m, deterministic) +} +func (m *BulkDeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkDeleteResponse.Merge(m, src) +} +func (m *BulkDeleteResponse) XXX_Size() int { + return xxx_messageInfo_BulkDeleteResponse.Size(m) +} +func (m *BulkDeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BulkDeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BulkDeleteResponse proto.InternalMessageInfo + +func (m *BulkDeleteResponse) GetCount() int32 { + if m != nil { + return m.Count + } + return 0 +} + +type SetMetadataRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Metadata *Metadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetMetadataRequest) Reset() { *m = SetMetadataRequest{} } +func (m *SetMetadataRequest) String() string { return proto.CompactTextString(m) } +func (*SetMetadataRequest) ProtoMessage() {} +func (*SetMetadataRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{18} +} + +func (m *SetMetadataRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetMetadataRequest.Unmarshal(m, b) +} +func (m *SetMetadataRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetMetadataRequest.Marshal(b, m, deterministic) +} +func (m *SetMetadataRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetMetadataRequest.Merge(m, src) +} +func (m *SetMetadataRequest) XXX_Size() int { + return xxx_messageInfo_SetMetadataRequest.Size(m) +} +func (m *SetMetadataRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetMetadataRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetMetadataRequest proto.InternalMessageInfo + +func (m *SetMetadataRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *SetMetadataRequest) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +type DeleteMetadataRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteMetadataRequest) Reset() { *m = DeleteMetadataRequest{} } +func (m *DeleteMetadataRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteMetadataRequest) ProtoMessage() {} +func (*DeleteMetadataRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{19} +} + +func (m *DeleteMetadataRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteMetadataRequest.Unmarshal(m, b) +} +func (m *DeleteMetadataRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteMetadataRequest.Marshal(b, m, deterministic) +} +func (m *DeleteMetadataRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteMetadataRequest.Merge(m, src) +} +func (m *DeleteMetadataRequest) XXX_Size() int { + return xxx_messageInfo_DeleteMetadataRequest.Size(m) +} +func (m *DeleteMetadataRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteMetadataRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteMetadataRequest proto.InternalMessageInfo + +func (m *DeleteMetadataRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type SearchRequest struct { + SearchRequest []byte `protobuf:"bytes,1,opt,name=search_request,json=searchRequest,proto3" json:"search_request,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SearchRequest) Reset() { *m = SearchRequest{} } +func (m *SearchRequest) String() string { return proto.CompactTextString(m) } +func (*SearchRequest) ProtoMessage() {} +func (*SearchRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{20} +} + +func (m *SearchRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SearchRequest.Unmarshal(m, b) +} +func (m *SearchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SearchRequest.Marshal(b, m, deterministic) +} +func (m *SearchRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SearchRequest.Merge(m, src) +} +func (m *SearchRequest) XXX_Size() int { + return xxx_messageInfo_SearchRequest.Size(m) +} +func (m *SearchRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SearchRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SearchRequest proto.InternalMessageInfo + +func (m *SearchRequest) GetSearchRequest() []byte { + if m != nil { + return m.SearchRequest + } + return nil +} + +type SearchResponse struct { + SearchResult []byte `protobuf:"bytes,1,opt,name=search_result,json=searchResult,proto3" json:"search_result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SearchResponse) Reset() { *m = SearchResponse{} } +func (m *SearchResponse) String() string { return proto.CompactTextString(m) } +func (*SearchResponse) ProtoMessage() {} +func (*SearchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{21} +} + +func (m *SearchResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SearchResponse.Unmarshal(m, b) +} +func (m *SearchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SearchResponse.Marshal(b, m, deterministic) +} +func (m *SearchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SearchResponse.Merge(m, src) +} +func (m *SearchResponse) XXX_Size() int { + return xxx_messageInfo_SearchResponse.Size(m) +} +func (m *SearchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SearchResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SearchResponse proto.InternalMessageInfo + +func (m *SearchResponse) GetSearchResult() []byte { + if m != nil { + return m.SearchResult + } + return nil +} + +type MappingResponse struct { + Mapping []byte `protobuf:"bytes,1,opt,name=mapping,proto3" json:"mapping,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MappingResponse) Reset() { *m = MappingResponse{} } +func (m *MappingResponse) String() string { return proto.CompactTextString(m) } +func (*MappingResponse) ProtoMessage() {} +func (*MappingResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{22} +} + +func (m *MappingResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MappingResponse.Unmarshal(m, b) +} +func (m *MappingResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MappingResponse.Marshal(b, m, deterministic) +} +func (m *MappingResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MappingResponse.Merge(m, src) +} +func (m *MappingResponse) XXX_Size() int { + return xxx_messageInfo_MappingResponse.Size(m) +} +func (m *MappingResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MappingResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MappingResponse proto.InternalMessageInfo + +func (m *MappingResponse) GetMapping() []byte { + if m != nil { + return m.Mapping + } + return nil +} + +type Event struct { + Type Event_Type `protobuf:"varint,1,opt,name=type,proto3,enum=index.Event_Type" json:"type,omitempty"` + Data *any.Any `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{23} +} + +func (m *Event) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Event.Unmarshal(m, b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Event.Marshal(b, m, deterministic) +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return xxx_messageInfo_Event.Size(m) +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *Event) GetType() Event_Type { + if m != nil { + return m.Type + } + return Event_Unknown +} + +func (m *Event) GetData() *any.Any { + if m != nil { + return m.Data + } + return nil +} + +type WatchResponse struct { + Event *Event `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WatchResponse) Reset() { *m = WatchResponse{} } +func (m *WatchResponse) String() string { return proto.CompactTextString(m) } +func (*WatchResponse) ProtoMessage() {} +func (*WatchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{24} +} + +func (m *WatchResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WatchResponse.Unmarshal(m, b) +} +func (m *WatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WatchResponse.Marshal(b, m, deterministic) +} +func (m *WatchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchResponse.Merge(m, src) +} +func (m *WatchResponse) XXX_Size() int { + return xxx_messageInfo_WatchResponse.Size(m) +} +func (m *WatchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WatchResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WatchResponse proto.InternalMessageInfo + +func (m *WatchResponse) GetEvent() *Event { + if m != nil { + return m.Event + } + return nil +} + +type MetricsResponse struct { + Metrics []byte `protobuf:"bytes,1,opt,name=metrics,proto3" json:"metrics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricsResponse) Reset() { *m = MetricsResponse{} } +func (m *MetricsResponse) String() string { return proto.CompactTextString(m) } +func (*MetricsResponse) ProtoMessage() {} +func (*MetricsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{25} +} + +func (m *MetricsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricsResponse.Unmarshal(m, b) +} +func (m *MetricsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricsResponse.Marshal(b, m, deterministic) +} +func (m *MetricsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricsResponse.Merge(m, src) +} +func (m *MetricsResponse) XXX_Size() int { + return xxx_messageInfo_MetricsResponse.Size(m) +} +func (m *MetricsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MetricsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricsResponse proto.InternalMessageInfo + +func (m *MetricsResponse) GetMetrics() []byte { + if m != nil { + return m.Metrics + } + return nil +} + +func init() { + proto.RegisterEnum("index.Event_Type", Event_Type_name, Event_Type_value) + proto.RegisterType((*LivenessCheckResponse)(nil), "index.LivenessCheckResponse") + proto.RegisterType((*ReadinessCheckResponse)(nil), "index.ReadinessCheckResponse") + proto.RegisterType((*Metadata)(nil), "index.Metadata") + proto.RegisterType((*Node)(nil), "index.Node") + proto.RegisterType((*Cluster)(nil), "index.Cluster") + proto.RegisterMapType((map[string]*Node)(nil), "index.Cluster.NodesEntry") + proto.RegisterType((*JoinRequest)(nil), "index.JoinRequest") + proto.RegisterType((*LeaveRequest)(nil), "index.LeaveRequest") + proto.RegisterType((*NodeResponse)(nil), "index.NodeResponse") + proto.RegisterType((*ClusterResponse)(nil), "index.ClusterResponse") + proto.RegisterType((*Document)(nil), "index.Document") + proto.RegisterType((*GetRequest)(nil), "index.GetRequest") + proto.RegisterType((*GetResponse)(nil), "index.GetResponse") + proto.RegisterType((*SetRequest)(nil), "index.SetRequest") + proto.RegisterType((*DeleteRequest)(nil), "index.DeleteRequest") + proto.RegisterType((*BulkIndexRequest)(nil), "index.BulkIndexRequest") + proto.RegisterType((*BulkIndexResponse)(nil), "index.BulkIndexResponse") + proto.RegisterType((*BulkDeleteRequest)(nil), "index.BulkDeleteRequest") + proto.RegisterType((*BulkDeleteResponse)(nil), "index.BulkDeleteResponse") + proto.RegisterType((*SetMetadataRequest)(nil), "index.SetMetadataRequest") + proto.RegisterType((*DeleteMetadataRequest)(nil), "index.DeleteMetadataRequest") + proto.RegisterType((*SearchRequest)(nil), "index.SearchRequest") + proto.RegisterType((*SearchResponse)(nil), "index.SearchResponse") + proto.RegisterType((*MappingResponse)(nil), "index.MappingResponse") + proto.RegisterType((*Event)(nil), "index.Event") + proto.RegisterType((*WatchResponse)(nil), "index.WatchResponse") + proto.RegisterType((*MetricsResponse)(nil), "index.MetricsResponse") +} + +func init() { proto.RegisterFile("protobuf/index.proto", fileDescriptor_28043ab4bd817113) } + +var fileDescriptor_28043ab4bd817113 = []byte{ + // 1166 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x57, 0xed, 0x6e, 0x1b, 0x45, + 0x17, 0xae, 0xbf, 0xdd, 0xe3, 0x8f, 0xb8, 0xa7, 0x76, 0xea, 0x6c, 0xd3, 0x36, 0x9d, 0x57, 0xd1, + 0x1b, 0x5c, 0x62, 0x97, 0x14, 0x10, 0x04, 0x81, 0x94, 0xb6, 0x56, 0x05, 0xa4, 0x51, 0xd9, 0x50, + 0x81, 0x00, 0x29, 0x9a, 0x78, 0x27, 0xce, 0x2a, 0xeb, 0xdd, 0x65, 0x77, 0xec, 0x62, 0xa1, 0xfe, + 0xe1, 0x16, 0xf8, 0xc5, 0x85, 0xc0, 0x8d, 0x70, 0x0b, 0x5c, 0x08, 0x9a, 0x8f, 0x5d, 0xef, 0xda, + 0xde, 0x46, 0xfc, 0xf2, 0xce, 0x9c, 0x67, 0x9e, 0xf3, 0xcc, 0x99, 0x99, 0xe7, 0xc8, 0xd0, 0xf6, + 0x03, 0x8f, 0x7b, 0xe7, 0xd3, 0x8b, 0x81, 0xed, 0x5a, 0xec, 0x97, 0xbe, 0x1c, 0x62, 0x49, 0x0e, + 0x8c, 0xad, 0xb1, 0xe7, 0x8d, 0x1d, 0x36, 0x88, 0x31, 0xd4, 0x9d, 0x2b, 0x84, 0x71, 0x77, 0x39, + 0xc4, 0x26, 0x3e, 0x8f, 0x82, 0xdb, 0x3a, 0x48, 0x7d, 0x7b, 0x40, 0x5d, 0xd7, 0xe3, 0x94, 0xdb, + 0x9e, 0x1b, 0xea, 0xe8, 0xfb, 0xf2, 0x67, 0xb4, 0x3f, 0x66, 0xee, 0x7e, 0xf8, 0x86, 0x8e, 0xc7, + 0x2c, 0x18, 0x78, 0xbe, 0x44, 0xac, 0xa2, 0xc9, 0x3e, 0x74, 0x8e, 0xed, 0x19, 0x73, 0x59, 0x18, + 0x3e, 0xbb, 0x64, 0xa3, 0x2b, 0x93, 0x85, 0xbe, 0xe7, 0x86, 0x0c, 0xdb, 0x50, 0xa2, 0x8e, 0x3d, + 0x63, 0xdd, 0xdc, 0x4e, 0x6e, 0xaf, 0x6a, 0xaa, 0x01, 0xe9, 0xc3, 0xa6, 0xc9, 0xa8, 0x65, 0xaf, + 0xc5, 0x07, 0x8c, 0x5a, 0xf3, 0x08, 0x2f, 0x07, 0xe4, 0x15, 0x54, 0x5f, 0x32, 0x4e, 0x2d, 0xca, + 0x29, 0x3e, 0x84, 0xfa, 0x38, 0xf0, 0x47, 0x67, 0xd4, 0xb2, 0x02, 0x16, 0x86, 0x12, 0x78, 0xd3, + 0xac, 0x89, 0xb9, 0x23, 0x35, 0x25, 0x20, 0x97, 0x9c, 0xfb, 0x31, 0x24, 0xaf, 0x20, 0x62, 0x4e, + 0x43, 0x88, 0x03, 0xc5, 0x13, 0xcf, 0x62, 0x02, 0x1a, 0xd0, 0x0b, 0xbe, 0xcc, 0x26, 0xe6, 0x22, + 0xb6, 0x47, 0x50, 0x9d, 0xe8, 0xe4, 0x92, 0xa9, 0x76, 0xb0, 0xd1, 0x57, 0xc7, 0x10, 0x69, 0x32, + 0x63, 0x80, 0xd0, 0x1f, 0x72, 0xca, 0x59, 0xb7, 0x20, 0x89, 0xd4, 0x80, 0xfc, 0x91, 0x83, 0xca, + 0x33, 0x67, 0x1a, 0x72, 0x16, 0xe0, 0x00, 0x4a, 0xae, 0x67, 0x31, 0x91, 0xaa, 0xb0, 0x57, 0x3b, + 0xd8, 0xd2, 0x5c, 0x3a, 0xdc, 0x17, 0xaa, 0xc2, 0xa1, 0xcb, 0x83, 0xb9, 0xa9, 0x70, 0xb8, 0x09, + 0x65, 0x87, 0x51, 0x8b, 0x05, 0x7a, 0x1f, 0x7a, 0x64, 0x0c, 0x01, 0x16, 0x60, 0x6c, 0x41, 0xe1, + 0x8a, 0xcd, 0xb5, 0x7e, 0xf1, 0x89, 0x0f, 0xa1, 0x34, 0xa3, 0xce, 0x94, 0x69, 0xd1, 0x35, 0x9d, + 0x48, 0xac, 0x31, 0x55, 0xe4, 0x30, 0xff, 0x49, 0x8e, 0x7c, 0x01, 0xb5, 0xaf, 0x3c, 0xdb, 0x35, + 0xd9, 0xcf, 0x53, 0x16, 0x72, 0x6c, 0x42, 0xde, 0xb6, 0x34, 0x4d, 0xde, 0xb6, 0xf0, 0x01, 0x14, + 0x85, 0x8c, 0x75, 0x24, 0x32, 0x40, 0xee, 0x43, 0xfd, 0x98, 0xd1, 0x19, 0xcb, 0x20, 0x20, 0x03, + 0xa8, 0x4b, 0x74, 0x74, 0xc2, 0x11, 0x61, 0x2e, 0x8b, 0xf0, 0x33, 0xd8, 0xd0, 0xc5, 0x88, 0xd7, + 0xec, 0x41, 0x65, 0xa4, 0xa6, 0xf4, 0xb2, 0x66, 0xba, 0x6a, 0x66, 0x14, 0x26, 0x07, 0x50, 0x7d, + 0xee, 0x8d, 0xa6, 0x13, 0xe6, 0xae, 0x6e, 0x65, 0x13, 0xca, 0x17, 0x36, 0x73, 0x2c, 0x75, 0x21, + 0xea, 0xa6, 0x1e, 0x91, 0x6d, 0x80, 0x17, 0x8c, 0x67, 0xe9, 0xdf, 0x85, 0x9a, 0x8c, 0x6a, 0x29, + 0x0b, 0x92, 0x5c, 0x8a, 0xe4, 0x43, 0x80, 0xd3, 0x4c, 0x92, 0xcc, 0xd4, 0x0f, 0xa0, 0xf1, 0x9c, + 0x39, 0x8c, 0x67, 0x56, 0xef, 0x08, 0x5a, 0x4f, 0xa7, 0xce, 0xd5, 0x97, 0x62, 0xb7, 0x11, 0x66, + 0x1f, 0xaa, 0x81, 0xfa, 0x8c, 0x2e, 0xd1, 0x2d, 0x5d, 0x8e, 0x85, 0x02, 0x33, 0x86, 0x90, 0xf7, + 0xe0, 0x56, 0x82, 0x62, 0xf1, 0xce, 0x46, 0xde, 0xd4, 0xe5, 0x32, 0x55, 0xc9, 0x54, 0x03, 0x32, + 0x54, 0xd0, 0xb4, 0xa4, 0xc7, 0x2b, 0xe9, 0xda, 0x3a, 0x5d, 0x0a, 0x97, 0xc8, 0xd8, 0x03, 0x4c, + 0xd2, 0xbc, 0x33, 0xe5, 0x37, 0x80, 0xa7, 0x8c, 0xc7, 0x2f, 0x29, 0xa3, 0x7e, 0xff, 0xe5, 0x0d, + 0x92, 0xff, 0x43, 0x47, 0xa5, 0xbe, 0x86, 0x95, 0x7c, 0x0c, 0x8d, 0x53, 0x46, 0x83, 0xd1, 0x65, + 0x04, 0xd8, 0x85, 0x66, 0x28, 0x27, 0xce, 0xf4, 0x5e, 0xf4, 0x21, 0x37, 0xc2, 0x24, 0x8c, 0x7c, + 0x04, 0xcd, 0x68, 0x9d, 0xde, 0xdb, 0xff, 0xa0, 0x11, 0x2f, 0x0c, 0xa7, 0x4e, 0xb4, 0xae, 0x1e, + 0xad, 0x13, 0x73, 0xe4, 0x11, 0x6c, 0xbc, 0xa4, 0xbe, 0x6f, 0xbb, 0xe3, 0x78, 0x5d, 0x17, 0x2a, + 0x13, 0x35, 0xa5, 0x57, 0x44, 0x43, 0xf2, 0x57, 0x0e, 0x4a, 0xc3, 0x99, 0xb8, 0xc6, 0xbb, 0x50, + 0xe4, 0x73, 0x5f, 0x3d, 0x98, 0x66, 0x7c, 0xd4, 0x32, 0xd6, 0xff, 0x76, 0xee, 0x33, 0x53, 0x86, + 0x71, 0x0f, 0x8a, 0x89, 0xf2, 0xb4, 0xfb, 0xca, 0xdd, 0xfb, 0x91, 0xf5, 0xf7, 0x8f, 0xdc, 0xb9, + 0x29, 0x11, 0xe4, 0x27, 0x28, 0x8a, 0x75, 0x58, 0x83, 0xca, 0x6b, 0xf7, 0xca, 0xf5, 0xde, 0xb8, + 0xad, 0x1b, 0x58, 0x85, 0xa2, 0xb0, 0x81, 0x56, 0x0e, 0x6f, 0x42, 0x49, 0x3e, 0xe8, 0x56, 0x1e, + 0x2b, 0x50, 0x38, 0x65, 0xbc, 0x55, 0x40, 0x80, 0xb2, 0x2a, 0x69, 0xab, 0x88, 0x0d, 0xb8, 0x19, + 0xdf, 0xa7, 0x56, 0x09, 0x9b, 0x00, 0x8b, 0xc3, 0x6e, 0x95, 0xc9, 0x13, 0x68, 0x7c, 0x47, 0x79, + 0xa2, 0x36, 0x04, 0x4a, 0x4c, 0x88, 0xd5, 0x4f, 0xb7, 0x9e, 0xdc, 0x80, 0xa9, 0x42, 0xb2, 0x34, + 0x8c, 0x07, 0xf6, 0x28, 0x4c, 0x95, 0x46, 0x4d, 0xc5, 0xa5, 0x51, 0xc3, 0x83, 0x3f, 0x01, 0x4a, + 0x32, 0x3b, 0x52, 0x68, 0xa4, 0xda, 0x0e, 0x6e, 0xae, 0x6c, 0x7b, 0x28, 0x3a, 0x9e, 0xb1, 0xad, + 0x93, 0xae, 0x6d, 0x52, 0xc4, 0xf8, 0xed, 0xef, 0x7f, 0x7e, 0xcf, 0xb7, 0x11, 0x07, 0xb3, 0x0f, + 0x06, 0x8e, 0x86, 0x9c, 0x8d, 0x24, 0xa3, 0x05, 0xcd, 0x74, 0xab, 0xca, 0xcc, 0x71, 0x4f, 0xe7, + 0x58, 0xdf, 0xd9, 0xc8, 0x5d, 0x99, 0xa4, 0x83, 0xb7, 0x45, 0x92, 0x20, 0xc2, 0xe8, 0x2c, 0x43, + 0xdd, 0x8e, 0xb2, 0xb8, 0x6f, 0x27, 0x6d, 0x32, 0x62, 0x6c, 0x49, 0x46, 0xc0, 0xaa, 0x60, 0x14, + 0xd6, 0x89, 0xa6, 0x3a, 0x44, 0x44, 0x0d, 0x4f, 0x18, 0xbb, 0x91, 0x41, 0x4d, 0xee, 0x4b, 0x96, + 0xae, 0xd1, 0x12, 0x2c, 0xda, 0x46, 0x07, 0xbf, 0xda, 0xd6, 0xdb, 0x43, 0x69, 0xc7, 0x78, 0xb2, + 0x68, 0x5d, 0x59, 0xea, 0x36, 0x97, 0xdc, 0x38, 0x12, 0x78, 0x5b, 0x52, 0x37, 0xb0, 0x96, 0xa0, + 0xc6, 0x13, 0x7d, 0xbd, 0x30, 0xda, 0x53, 0xb2, 0x7b, 0x64, 0xaa, 0xec, 0x4a, 0x2a, 0xec, 0xad, + 0xa8, 0xc4, 0x57, 0x50, 0x3d, 0x75, 0xa9, 0x1f, 0x5e, 0x7a, 0xfc, 0x1d, 0x02, 0xd7, 0xb3, 0xb6, + 0x25, 0x6b, 0x13, 0xeb, 0x82, 0x35, 0x8c, 0x58, 0xbe, 0x4f, 0x5c, 0x70, 0xbc, 0xa3, 0x55, 0x2e, + 0xbb, 0xb0, 0xd1, 0x5d, 0x0d, 0xe8, 0x6d, 0x6b, 0xad, 0x46, 0x43, 0xb0, 0x5a, 0xba, 0x1b, 0x85, + 0x87, 0xb9, 0x1e, 0xfe, 0x98, 0x7c, 0x2b, 0x98, 0x64, 0x48, 0x59, 0xa9, 0xb1, 0xb5, 0x26, 0x92, + 0x26, 0xef, 0xad, 0x92, 0x7f, 0x0d, 0x85, 0x17, 0x8c, 0x63, 0x64, 0x10, 0x8b, 0x96, 0x66, 0x60, + 0x72, 0x4a, 0xf3, 0xdc, 0x93, 0x3c, 0x77, 0xb0, 0x93, 0xe2, 0x11, 0x25, 0xfd, 0xbc, 0xd7, 0x7b, + 0x8b, 0xa6, 0x7c, 0xf9, 0xb8, 0xda, 0x58, 0x32, 0x6b, 0xb9, 0x23, 0x09, 0x0d, 0x63, 0x3d, 0xa1, + 0x10, 0xf8, 0x3a, 0x32, 0x11, 0x5c, 0xdb, 0x40, 0x32, 0x99, 0xb5, 0xd4, 0x5e, 0x86, 0xd4, 0x63, + 0x28, 0x2b, 0x37, 0x8e, 0x69, 0x53, 0xa6, 0x6e, 0x74, 0x96, 0x66, 0x75, 0x01, 0x3a, 0x92, 0x75, + 0x83, 0x80, 0x3c, 0x7b, 0x19, 0x13, 0x22, 0x4f, 0xa0, 0xa2, 0x4d, 0xfa, 0xda, 0xeb, 0xbe, 0x64, + 0xe6, 0xe9, 0xeb, 0xae, 0x7d, 0x1c, 0x3f, 0x85, 0x92, 0xb4, 0xc3, 0x4c, 0xb6, 0x48, 0x74, 0xca, + 0x34, 0xc9, 0x8d, 0xc7, 0x39, 0x29, 0x45, 0x59, 0xde, 0xf5, 0x52, 0xd2, 0xe6, 0xb9, 0x24, 0x45, + 0x05, 0x9f, 0x92, 0x1f, 0x76, 0xc6, 0x36, 0xbf, 0x9c, 0x9e, 0xf7, 0x47, 0xde, 0x64, 0x30, 0xf1, + 0xc2, 0xe9, 0x15, 0x1d, 0x9c, 0x3b, 0x34, 0xe4, 0xf1, 0x1f, 0x84, 0xf3, 0xb2, 0xfc, 0x7a, 0xf2, + 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xd3, 0x19, 0x60, 0x72, 0x0c, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// IndexClient is the client API for Index service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type IndexClient interface { + LivenessCheck(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessCheckResponse, error) + ReadinessCheck(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessCheckResponse, error) + Node(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeResponse, error) + Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Cluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterResponse, error) + Leave(ctx context.Context, in *LeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) + BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) + BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) + Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) + Mapping(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*MappingResponse, error) + Watch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_WatchClient, error) + Metrics(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*MetricsResponse, error) +} + +type indexClient struct { + cc *grpc.ClientConn +} + +func NewIndexClient(cc *grpc.ClientConn) IndexClient { + return &indexClient{cc} +} + +func (c *indexClient) LivenessCheck(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessCheckResponse, error) { + out := new(LivenessCheckResponse) + err := c.cc.Invoke(ctx, "/index.Index/LivenessCheck", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) ReadinessCheck(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessCheckResponse, error) { + out := new(ReadinessCheckResponse) + err := c.cc.Invoke(ctx, "/index.Index/ReadinessCheck", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Node(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeResponse, error) { + out := new(NodeResponse) + err := c.cc.Invoke(ctx, "/index.Index/Node", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Join", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Cluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterResponse, error) { + out := new(ClusterResponse) + err := c.cc.Invoke(ctx, "/index.Index/Cluster", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Leave(ctx context.Context, in *LeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Leave", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Snapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) { + out := new(BulkIndexResponse) + err := c.cc.Invoke(ctx, "/index.Index/BulkIndex", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) { + out := new(BulkDeleteResponse) + err := c.cc.Invoke(ctx, "/index.Index/BulkDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { + out := new(GetResponse) + err := c.cc.Invoke(ctx, "/index.Index/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Set", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { + out := new(SearchResponse) + err := c.cc.Invoke(ctx, "/index.Index/Search", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Mapping(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*MappingResponse, error) { + out := new(MappingResponse) + err := c.cc.Invoke(ctx, "/index.Index/Mapping", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Watch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_WatchClient, error) { + stream, err := c.cc.NewStream(ctx, &_Index_serviceDesc.Streams[0], "/index.Index/Watch", opts...) + if err != nil { + return nil, err + } + x := &indexWatchClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Index_WatchClient interface { + Recv() (*WatchResponse, error) + grpc.ClientStream +} + +type indexWatchClient struct { + grpc.ClientStream +} + +func (x *indexWatchClient) Recv() (*WatchResponse, error) { + m := new(WatchResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *indexClient) Metrics(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*MetricsResponse, error) { + out := new(MetricsResponse) + err := c.cc.Invoke(ctx, "/index.Index/Metrics", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// IndexServer is the server API for Index service. +type IndexServer interface { + LivenessCheck(context.Context, *empty.Empty) (*LivenessCheckResponse, error) + ReadinessCheck(context.Context, *empty.Empty) (*ReadinessCheckResponse, error) + Node(context.Context, *empty.Empty) (*NodeResponse, error) + Join(context.Context, *JoinRequest) (*empty.Empty, error) + Cluster(context.Context, *empty.Empty) (*ClusterResponse, error) + Leave(context.Context, *LeaveRequest) (*empty.Empty, error) + Snapshot(context.Context, *empty.Empty) (*empty.Empty, error) + BulkIndex(context.Context, *BulkIndexRequest) (*BulkIndexResponse, error) + BulkDelete(context.Context, *BulkDeleteRequest) (*BulkDeleteResponse, error) + Get(context.Context, *GetRequest) (*GetResponse, error) + Set(context.Context, *SetRequest) (*empty.Empty, error) + Delete(context.Context, *DeleteRequest) (*empty.Empty, error) + Search(context.Context, *SearchRequest) (*SearchResponse, error) + Mapping(context.Context, *empty.Empty) (*MappingResponse, error) + Watch(*empty.Empty, Index_WatchServer) error + Metrics(context.Context, *empty.Empty) (*MetricsResponse, error) +} + +// UnimplementedIndexServer can be embedded to have forward compatible implementations. +type UnimplementedIndexServer struct { +} + +func (*UnimplementedIndexServer) LivenessCheck(ctx context.Context, req *empty.Empty) (*LivenessCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LivenessCheck not implemented") +} +func (*UnimplementedIndexServer) ReadinessCheck(ctx context.Context, req *empty.Empty) (*ReadinessCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadinessCheck not implemented") +} +func (*UnimplementedIndexServer) Node(ctx context.Context, req *empty.Empty) (*NodeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Node not implemented") +} +func (*UnimplementedIndexServer) Join(ctx context.Context, req *JoinRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Join not implemented") +} +func (*UnimplementedIndexServer) Cluster(ctx context.Context, req *empty.Empty) (*ClusterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Cluster not implemented") +} +func (*UnimplementedIndexServer) Leave(ctx context.Context, req *LeaveRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Leave not implemented") +} +func (*UnimplementedIndexServer) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Snapshot not implemented") +} +func (*UnimplementedIndexServer) BulkIndex(ctx context.Context, req *BulkIndexRequest) (*BulkIndexResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BulkIndex not implemented") +} +func (*UnimplementedIndexServer) BulkDelete(ctx context.Context, req *BulkDeleteRequest) (*BulkDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BulkDelete not implemented") +} +func (*UnimplementedIndexServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (*UnimplementedIndexServer) Set(ctx context.Context, req *SetRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") +} +func (*UnimplementedIndexServer) Delete(ctx context.Context, req *DeleteRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (*UnimplementedIndexServer) Search(ctx context.Context, req *SearchRequest) (*SearchResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Search not implemented") +} +func (*UnimplementedIndexServer) Mapping(ctx context.Context, req *empty.Empty) (*MappingResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Mapping not implemented") +} +func (*UnimplementedIndexServer) Watch(req *empty.Empty, srv Index_WatchServer) error { + return status.Errorf(codes.Unimplemented, "method Watch not implemented") +} +func (*UnimplementedIndexServer) Metrics(ctx context.Context, req *empty.Empty) (*MetricsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Metrics not implemented") +} + +func RegisterIndexServer(s *grpc.Server, srv IndexServer) { + s.RegisterService(&_Index_serviceDesc, srv) +} + +func _Index_LivenessCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).LivenessCheck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/LivenessCheck", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).LivenessCheck(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_ReadinessCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).ReadinessCheck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/ReadinessCheck", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).ReadinessCheck(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Node_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Node(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Node", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Node(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Join_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(JoinRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Join(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Join", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Join(ctx, req.(*JoinRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Cluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Cluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Cluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Cluster(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Leave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Leave(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Leave", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Leave(ctx, req.(*LeaveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Snapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Snapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Snapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Snapshot(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_BulkIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BulkIndexRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).BulkIndex(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/BulkIndex", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).BulkIndex(ctx, req.(*BulkIndexRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_BulkDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BulkDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).BulkDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/BulkDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).BulkDelete(ctx, req.(*BulkDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Get(ctx, req.(*GetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Set(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Set", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Set(ctx, req.(*SetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Delete(ctx, req.(*DeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Search(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Search", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Search(ctx, req.(*SearchRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Mapping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Mapping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Mapping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Mapping(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(empty.Empty) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(IndexServer).Watch(m, &indexWatchServer{stream}) +} + +type Index_WatchServer interface { + Send(*WatchResponse) error + grpc.ServerStream +} + +type indexWatchServer struct { + grpc.ServerStream +} + +func (x *indexWatchServer) Send(m *WatchResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Index_Metrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Metrics(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Metrics", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Metrics(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _Index_serviceDesc = grpc.ServiceDesc{ + ServiceName: "index.Index", + HandlerType: (*IndexServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "LivenessCheck", + Handler: _Index_LivenessCheck_Handler, + }, + { + MethodName: "ReadinessCheck", + Handler: _Index_ReadinessCheck_Handler, + }, + { + MethodName: "Node", + Handler: _Index_Node_Handler, + }, + { + MethodName: "Join", + Handler: _Index_Join_Handler, + }, + { + MethodName: "Cluster", + Handler: _Index_Cluster_Handler, + }, + { + MethodName: "Leave", + Handler: _Index_Leave_Handler, + }, + { + MethodName: "Snapshot", + Handler: _Index_Snapshot_Handler, + }, + { + MethodName: "BulkIndex", + Handler: _Index_BulkIndex_Handler, + }, + { + MethodName: "BulkDelete", + Handler: _Index_BulkDelete_Handler, + }, + { + MethodName: "Get", + Handler: _Index_Get_Handler, + }, + { + MethodName: "Set", + Handler: _Index_Set_Handler, + }, + { + MethodName: "Delete", + Handler: _Index_Delete_Handler, + }, + { + MethodName: "Search", + Handler: _Index_Search_Handler, + }, + { + MethodName: "Mapping", + Handler: _Index_Mapping_Handler, + }, + { + MethodName: "Metrics", + Handler: _Index_Metrics_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Index_Watch_Handler, + ServerStreams: true, + }, + }, + Metadata: "protobuf/index.proto", +} diff --git a/protobuf/index.pb.gw.go b/protobuf/index.pb.gw.go new file mode 100644 index 0000000..810b9ed --- /dev/null +++ b/protobuf/index.pb.gw.go @@ -0,0 +1,1276 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: protobuf/index.proto + +/* +Package protobuf is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package protobuf + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/empty" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage + +func request_Index_LivenessCheck_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.LivenessCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_LivenessCheck_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.LivenessCheck(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_ReadinessCheck_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.ReadinessCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_ReadinessCheck_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.ReadinessCheck(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Node_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.Node(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Node_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.Node(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Join_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq JoinRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Node); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Join(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Join_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq JoinRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Node); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.Join(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Cluster_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.Cluster(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Cluster_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.Cluster(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Leave_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq LeaveRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Leave(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Leave_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq LeaveRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.Leave(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.Snapshot(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.Snapshot(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_BulkIndex_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkIndexRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BulkIndex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_BulkIndex_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkIndexRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.BulkIndex(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_BulkDelete_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkDeleteRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BulkDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_BulkDelete_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkDeleteRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.BulkDelete(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Get_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Get_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.Get(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Set_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SetRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Set(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Set_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SetRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.Set(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Delete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Delete_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.Delete(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Search_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SearchRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Search(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Search_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SearchRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Search(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Mapping_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.Mapping(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Mapping_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.Mapping(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Metrics_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.Metrics(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Metrics_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.Metrics(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterIndexHandlerServer registers the http handlers for service Index to "mux". +// UnaryRPC :call IndexServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +func RegisterIndexHandlerServer(ctx context.Context, mux *runtime.ServeMux, server IndexServer) error { + + mux.Handle("GET", pattern_Index_LivenessCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_LivenessCheck_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_LivenessCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_ReadinessCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_ReadinessCheck_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_ReadinessCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Node_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Node_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Node_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_Join_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Join_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Join_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Cluster_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Cluster_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Cluster_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_Leave_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Leave_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Leave_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Snapshot_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Snapshot_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_BulkIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_BulkIndex_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_BulkIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_BulkDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_BulkDelete_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_BulkDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Get_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_Set_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Set_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Set_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Delete_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Index_Search_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Search_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Search_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Mapping_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Mapping_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Mapping_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Metrics_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Metrics_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Metrics_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterIndexHandlerFromEndpoint is same as RegisterIndexHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterIndexHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterIndexHandler(ctx, mux, conn) +} + +// RegisterIndexHandler registers the http handlers for service Index to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterIndexHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterIndexHandlerClient(ctx, mux, NewIndexClient(conn)) +} + +// RegisterIndexHandlerClient registers the http handlers for service Index +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "IndexClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "IndexClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "IndexClient" to call the correct interceptors. +func RegisterIndexHandlerClient(ctx context.Context, mux *runtime.ServeMux, client IndexClient) error { + + mux.Handle("GET", pattern_Index_LivenessCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_LivenessCheck_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_LivenessCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_ReadinessCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_ReadinessCheck_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_ReadinessCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Node_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Node_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Node_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_Join_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Join_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Join_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Cluster_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Cluster_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Cluster_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_Leave_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Leave_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Leave_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Snapshot_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_BulkIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_BulkIndex_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_BulkIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_BulkDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_BulkDelete_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_BulkDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Get_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_Set_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Set_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Set_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Delete_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Index_Search_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Search_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Search_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Mapping_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Mapping_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Mapping_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Metrics_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Metrics_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Metrics_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Index_LivenessCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "liveness_check"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_ReadinessCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "readiness_check"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Node_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "node"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Join_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1", "cluster", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Cluster_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "cluster"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Leave_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1", "cluster", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "snapshot"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_BulkIndex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "documents"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_BulkDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "documents"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Set_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Search_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "search"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Mapping_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "mapping"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Metrics_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Index_LivenessCheck_0 = runtime.ForwardResponseMessage + + forward_Index_ReadinessCheck_0 = runtime.ForwardResponseMessage + + forward_Index_Node_0 = runtime.ForwardResponseMessage + + forward_Index_Join_0 = runtime.ForwardResponseMessage + + forward_Index_Cluster_0 = runtime.ForwardResponseMessage + + forward_Index_Leave_0 = runtime.ForwardResponseMessage + + forward_Index_Snapshot_0 = runtime.ForwardResponseMessage + + forward_Index_BulkIndex_0 = runtime.ForwardResponseMessage + + forward_Index_BulkDelete_0 = runtime.ForwardResponseMessage + + forward_Index_Get_0 = runtime.ForwardResponseMessage + + forward_Index_Set_0 = runtime.ForwardResponseMessage + + forward_Index_Delete_0 = runtime.ForwardResponseMessage + + forward_Index_Search_0 = runtime.ForwardResponseMessage + + forward_Index_Mapping_0 = runtime.ForwardResponseMessage + + forward_Index_Metrics_0 = runtime.ForwardResponseMessage +) diff --git a/protobuf/index.proto b/protobuf/index.proto new file mode 100644 index 0000000..9eb168b --- /dev/null +++ b/protobuf/index.proto @@ -0,0 +1,223 @@ +syntax = "proto3"; + +import "google/protobuf/any.proto"; +import "google/protobuf/empty.proto"; +import "google/api/annotations.proto"; +import "protoc-gen-swagger/options/annotations.proto"; + +package index; + +option go_package = "github.com/mosuka/blast/protobuf"; + +service Index { + rpc LivenessCheck (google.protobuf.Empty) returns (LivenessCheckResponse) { + option (google.api.http) = { + get: "/v1/liveness_check" + }; + } + + rpc ReadinessCheck (google.protobuf.Empty) returns (ReadinessCheckResponse) { + option (google.api.http) = { + get: "/v1/readiness_check" + }; + } + + rpc Node (google.protobuf.Empty) returns (NodeResponse) { + option (google.api.http) = { + get: "/v1/node" + }; + } + rpc Join (JoinRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + put: "/v1/cluster/{id}" + body: "node" + }; + } + rpc Cluster (google.protobuf.Empty) returns (ClusterResponse) { + option (google.api.http) = { + get: "/v1/cluster" + }; + } + rpc Leave (LeaveRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/cluster/{id}" + }; + } + + rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) { + option (google.api.http) = { + get: "/v1/snapshot" + }; + } + + rpc BulkIndex (BulkIndexRequest) returns (BulkIndexResponse) { + option (google.api.http) = { + put: "/v1/documents" + body: "*" + }; + } + rpc BulkDelete (BulkDeleteRequest) returns (BulkDeleteResponse) { + option (google.api.http) = { + delete: "/v1/documents" + body: "*" + }; + } + rpc Get (GetRequest) returns (GetResponse) { + option (google.api.http) = { + get: "/v1/documents/{id=**}" + }; + } + rpc Set (SetRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + put: "/v1/documents/{id=**}" + body: "*" + }; + } + rpc Delete (DeleteRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/documents/{id=**}" + }; + } + rpc Search (SearchRequest) returns (SearchResponse) { + option (google.api.http) = { + post: "/v1/search" + body: "*" + }; + } + + rpc Mapping (google.protobuf.Empty) returns (MappingResponse) { + option (google.api.http) = { + get: "/v1/mapping" + }; + } + + rpc Watch (google.protobuf.Empty) returns (stream WatchResponse) {} + + rpc Metrics (google.protobuf.Empty) returns (MetricsResponse) { + option (google.api.http) = { + get: "/v1/metrics" + }; + } +} + +message LivenessCheckResponse { + bool alive = 1; +} + +message ReadinessCheckResponse { + bool ready = 1; +} + +message Metadata { + string grpc_address = 1; + string http_address = 2; +} + +message Node { + string raft_address = 1; + Metadata metadata = 2; + string state = 3; +} + +message Cluster { + map nodes = 1; + string leader = 2; +} + +message JoinRequest { + string id = 1; + Node node = 2; +} + +message LeaveRequest { + string id = 1; +} + +message NodeResponse { + Node node = 1; +} + +message ClusterResponse { + Cluster cluster = 1; +} + +message Document { + string id = 1; + bytes fields = 2; +} + +message GetRequest { + string id = 1; +} + +message GetResponse { + bytes fields = 1; +} + +message SetRequest { + string id = 1; + bytes fields = 2; +} + +message DeleteRequest { + string id = 1; +} + +message BulkIndexRequest { + repeated SetRequest requests = 1; +} + +message BulkIndexResponse { + int32 count = 1; +} + +message BulkDeleteRequest { + repeated DeleteRequest requests = 1; +} + +message BulkDeleteResponse { + int32 count = 1; +} + +message SetMetadataRequest { + string id = 1; + Metadata metadata = 2; +} + +message DeleteMetadataRequest { + string id = 1; +} + +message SearchRequest { + bytes search_request = 1; +} + +message SearchResponse { + bytes search_result = 1; +} + +message MappingResponse { + bytes mapping = 1; +} + +message Event { + enum Type { + Unknown = 0; + Join = 1; + Leave = 2; + Set = 3; + Delete = 4; + BulkIndex = 5; + BulkDelete = 6; + } + Type type = 1; + google.protobuf.Any data = 2; +} + +message WatchResponse { + Event event = 1; +} + +message MetricsResponse { + bytes metrics = 1; +} diff --git a/protobuf/index/index.go b/protobuf/index/index.go deleted file mode 100644 index 31a3023..0000000 --- a/protobuf/index/index.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package index - -import ( - "encoding/json" - "errors" - - "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/protobuf" -) - -func MarshalDocument(doc *Document) ([]byte, error) { - if doc == nil { - return nil, errors.New("nil") - } - - fieldsIntr, err := protobuf.MarshalAny(doc.Fields) - if err != nil { - return nil, err - } - - docMap := map[string]interface{}{ - "id": doc.Id, - "fields": *fieldsIntr.(*map[string]interface{}), - } - - docBytes, err := json.Marshal(docMap) - if err != nil { - return nil, err - } - - return docBytes, nil -} - -func UnmarshalDocument(data []byte, doc *Document) error { - var err error - - if data == nil || len(data) <= 0 || doc == nil { - return nil - } - - var docMap map[string]interface{} - err = json.Unmarshal(data, &docMap) - if err != nil { - return err - } - - if id, ok := docMap["id"].(string); ok { - doc.Id = id - } - - if fieldsMap, ok := docMap["fields"].(map[string]interface{}); ok { - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(fieldsMap, fieldsAny) - if err != nil { - return err - } - doc.Fields = fieldsAny - } - - return nil -} diff --git a/protobuf/index/index.pb.go b/protobuf/index/index.pb.go deleted file mode 100644 index b60dbee..0000000 --- a/protobuf/index/index.pb.go +++ /dev/null @@ -1,2051 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: protobuf/index/index.proto - -package index - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" - empty "github.com/golang/protobuf/ptypes/empty" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type NodeHealthCheckRequest_Probe int32 - -const ( - NodeHealthCheckRequest_UNKNOWN NodeHealthCheckRequest_Probe = 0 - NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 1 - NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 2 - NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 3 -) - -var NodeHealthCheckRequest_Probe_name = map[int32]string{ - 0: "UNKNOWN", - 1: "HEALTHINESS", - 2: "LIVENESS", - 3: "READINESS", -} - -var NodeHealthCheckRequest_Probe_value = map[string]int32{ - "UNKNOWN": 0, - "HEALTHINESS": 1, - "LIVENESS": 2, - "READINESS": 3, -} - -func (x NodeHealthCheckRequest_Probe) String() string { - return proto.EnumName(NodeHealthCheckRequest_Probe_name, int32(x)) -} - -func (NodeHealthCheckRequest_Probe) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{0, 0} -} - -type NodeHealthCheckResponse_State int32 - -const ( - NodeHealthCheckResponse_UNKNOWN NodeHealthCheckResponse_State = 0 - NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 1 - NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 2 - NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 3 - NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 4 - NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 5 - NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 6 -) - -var NodeHealthCheckResponse_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "HEALTHY", - 2: "UNHEALTHY", - 3: "ALIVE", - 4: "DEAD", - 5: "READY", - 6: "NOT_READY", -} - -var NodeHealthCheckResponse_State_value = map[string]int32{ - "UNKNOWN": 0, - "HEALTHY": 1, - "UNHEALTHY": 2, - "ALIVE": 3, - "DEAD": 4, - "READY": 5, - "NOT_READY": 6, -} - -func (x NodeHealthCheckResponse_State) String() string { - return proto.EnumName(NodeHealthCheckResponse_State_name, int32(x)) -} - -func (NodeHealthCheckResponse_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{1, 0} -} - -type Node_State int32 - -const ( - Node_UNKNOWN Node_State = 0 - Node_FOLLOWER Node_State = 1 - Node_CANDIDATE Node_State = 2 - Node_LEADER Node_State = 3 - Node_SHUTDOWN Node_State = 4 -) - -var Node_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "FOLLOWER", - 2: "CANDIDATE", - 3: "LEADER", - 4: "SHUTDOWN", -} - -var Node_State_value = map[string]int32{ - "UNKNOWN": 0, - "FOLLOWER": 1, - "CANDIDATE": 2, - "LEADER": 3, - "SHUTDOWN": 4, -} - -func (x Node_State) String() string { - return proto.EnumName(Node_State_name, int32(x)) -} - -func (Node_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{3, 0} -} - -type ClusterWatchResponse_Event int32 - -const ( - ClusterWatchResponse_UNKNOWN ClusterWatchResponse_Event = 0 - ClusterWatchResponse_JOIN ClusterWatchResponse_Event = 1 - ClusterWatchResponse_LEAVE ClusterWatchResponse_Event = 2 - ClusterWatchResponse_UPDATE ClusterWatchResponse_Event = 3 -) - -var ClusterWatchResponse_Event_name = map[int32]string{ - 0: "UNKNOWN", - 1: "JOIN", - 2: "LEAVE", - 3: "UPDATE", -} - -var ClusterWatchResponse_Event_value = map[string]int32{ - "UNKNOWN": 0, - "JOIN": 1, - "LEAVE": 2, - "UPDATE": 3, -} - -func (x ClusterWatchResponse_Event) String() string { - return proto.EnumName(ClusterWatchResponse_Event_name, int32(x)) -} - -func (ClusterWatchResponse_Event) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{9, 0} -} - -type Proposal_Event int32 - -const ( - Proposal_UNKNOWN Proposal_Event = 0 - Proposal_SET_NODE Proposal_Event = 1 - Proposal_DELETE_NODE Proposal_Event = 2 - Proposal_INDEX Proposal_Event = 3 - Proposal_DELETE Proposal_Event = 4 - Proposal_BULK_INDEX Proposal_Event = 5 - Proposal_BULK_DELETE Proposal_Event = 6 -) - -var Proposal_Event_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SET_NODE", - 2: "DELETE_NODE", - 3: "INDEX", - 4: "DELETE", - 5: "BULK_INDEX", - 6: "BULK_DELETE", -} - -var Proposal_Event_value = map[string]int32{ - "UNKNOWN": 0, - "SET_NODE": 1, - "DELETE_NODE": 2, - "INDEX": 3, - "DELETE": 4, - "BULK_INDEX": 5, - "BULK_DELETE": 6, -} - -func (x Proposal_Event) String() string { - return proto.EnumName(Proposal_Event_name, int32(x)) -} - -func (Proposal_Event) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{24, 0} -} - -type NodeHealthCheckRequest struct { - Probe NodeHealthCheckRequest_Probe `protobuf:"varint,1,opt,name=probe,proto3,enum=index.NodeHealthCheckRequest_Probe" json:"probe,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeHealthCheckRequest) Reset() { *m = NodeHealthCheckRequest{} } -func (m *NodeHealthCheckRequest) String() string { return proto.CompactTextString(m) } -func (*NodeHealthCheckRequest) ProtoMessage() {} -func (*NodeHealthCheckRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{0} -} - -func (m *NodeHealthCheckRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeHealthCheckRequest.Unmarshal(m, b) -} -func (m *NodeHealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeHealthCheckRequest.Marshal(b, m, deterministic) -} -func (m *NodeHealthCheckRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeHealthCheckRequest.Merge(m, src) -} -func (m *NodeHealthCheckRequest) XXX_Size() int { - return xxx_messageInfo_NodeHealthCheckRequest.Size(m) -} -func (m *NodeHealthCheckRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeHealthCheckRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeHealthCheckRequest proto.InternalMessageInfo - -func (m *NodeHealthCheckRequest) GetProbe() NodeHealthCheckRequest_Probe { - if m != nil { - return m.Probe - } - return NodeHealthCheckRequest_UNKNOWN -} - -type NodeHealthCheckResponse struct { - State NodeHealthCheckResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=index.NodeHealthCheckResponse_State" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeHealthCheckResponse) Reset() { *m = NodeHealthCheckResponse{} } -func (m *NodeHealthCheckResponse) String() string { return proto.CompactTextString(m) } -func (*NodeHealthCheckResponse) ProtoMessage() {} -func (*NodeHealthCheckResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{1} -} - -func (m *NodeHealthCheckResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeHealthCheckResponse.Unmarshal(m, b) -} -func (m *NodeHealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeHealthCheckResponse.Marshal(b, m, deterministic) -} -func (m *NodeHealthCheckResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeHealthCheckResponse.Merge(m, src) -} -func (m *NodeHealthCheckResponse) XXX_Size() int { - return xxx_messageInfo_NodeHealthCheckResponse.Size(m) -} -func (m *NodeHealthCheckResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeHealthCheckResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeHealthCheckResponse proto.InternalMessageInfo - -func (m *NodeHealthCheckResponse) GetState() NodeHealthCheckResponse_State { - if m != nil { - return m.State - } - return NodeHealthCheckResponse_UNKNOWN -} - -type Metadata struct { - GrpcAddress string `protobuf:"bytes,1,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` - GrpcGatewayAddress string `protobuf:"bytes,2,opt,name=grpc_gateway_address,json=grpcGatewayAddress,proto3" json:"grpc_gateway_address,omitempty"` - HttpAddress string `protobuf:"bytes,3,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{2} -} - -func (m *Metadata) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metadata.Unmarshal(m, b) -} -func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) -} -func (m *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(m, src) -} -func (m *Metadata) XXX_Size() int { - return xxx_messageInfo_Metadata.Size(m) -} -func (m *Metadata) XXX_DiscardUnknown() { - xxx_messageInfo_Metadata.DiscardUnknown(m) -} - -var xxx_messageInfo_Metadata proto.InternalMessageInfo - -func (m *Metadata) GetGrpcAddress() string { - if m != nil { - return m.GrpcAddress - } - return "" -} - -func (m *Metadata) GetGrpcGatewayAddress() string { - if m != nil { - return m.GrpcGatewayAddress - } - return "" -} - -func (m *Metadata) GetHttpAddress() string { - if m != nil { - return m.HttpAddress - } - return "" -} - -type Node struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - BindAddress string `protobuf:"bytes,2,opt,name=bind_address,json=bindAddress,proto3" json:"bind_address,omitempty"` - State Node_State `protobuf:"varint,3,opt,name=state,proto3,enum=index.Node_State" json:"state,omitempty"` - Metadata *Metadata `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Node) Reset() { *m = Node{} } -func (m *Node) String() string { return proto.CompactTextString(m) } -func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{3} -} - -func (m *Node) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Node.Unmarshal(m, b) -} -func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Node.Marshal(b, m, deterministic) -} -func (m *Node) XXX_Merge(src proto.Message) { - xxx_messageInfo_Node.Merge(m, src) -} -func (m *Node) XXX_Size() int { - return xxx_messageInfo_Node.Size(m) -} -func (m *Node) XXX_DiscardUnknown() { - xxx_messageInfo_Node.DiscardUnknown(m) -} - -var xxx_messageInfo_Node proto.InternalMessageInfo - -func (m *Node) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *Node) GetBindAddress() string { - if m != nil { - return m.BindAddress - } - return "" -} - -func (m *Node) GetState() Node_State { - if m != nil { - return m.State - } - return Node_UNKNOWN -} - -func (m *Node) GetMetadata() *Metadata { - if m != nil { - return m.Metadata - } - return nil -} - -type Cluster struct { - Nodes map[string]*Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Cluster) Reset() { *m = Cluster{} } -func (m *Cluster) String() string { return proto.CompactTextString(m) } -func (*Cluster) ProtoMessage() {} -func (*Cluster) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{4} -} - -func (m *Cluster) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Cluster.Unmarshal(m, b) -} -func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) -} -func (m *Cluster) XXX_Merge(src proto.Message) { - xxx_messageInfo_Cluster.Merge(m, src) -} -func (m *Cluster) XXX_Size() int { - return xxx_messageInfo_Cluster.Size(m) -} -func (m *Cluster) XXX_DiscardUnknown() { - xxx_messageInfo_Cluster.DiscardUnknown(m) -} - -var xxx_messageInfo_Cluster proto.InternalMessageInfo - -func (m *Cluster) GetNodes() map[string]*Node { - if m != nil { - return m.Nodes - } - return nil -} - -type NodeInfoResponse struct { - Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeInfoResponse) Reset() { *m = NodeInfoResponse{} } -func (m *NodeInfoResponse) String() string { return proto.CompactTextString(m) } -func (*NodeInfoResponse) ProtoMessage() {} -func (*NodeInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{5} -} - -func (m *NodeInfoResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeInfoResponse.Unmarshal(m, b) -} -func (m *NodeInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeInfoResponse.Marshal(b, m, deterministic) -} -func (m *NodeInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeInfoResponse.Merge(m, src) -} -func (m *NodeInfoResponse) XXX_Size() int { - return xxx_messageInfo_NodeInfoResponse.Size(m) -} -func (m *NodeInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeInfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeInfoResponse proto.InternalMessageInfo - -func (m *NodeInfoResponse) GetNode() *Node { - if m != nil { - return m.Node - } - return nil -} - -type ClusterJoinRequest struct { - Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClusterJoinRequest) Reset() { *m = ClusterJoinRequest{} } -func (m *ClusterJoinRequest) String() string { return proto.CompactTextString(m) } -func (*ClusterJoinRequest) ProtoMessage() {} -func (*ClusterJoinRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{6} -} - -func (m *ClusterJoinRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClusterJoinRequest.Unmarshal(m, b) -} -func (m *ClusterJoinRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClusterJoinRequest.Marshal(b, m, deterministic) -} -func (m *ClusterJoinRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterJoinRequest.Merge(m, src) -} -func (m *ClusterJoinRequest) XXX_Size() int { - return xxx_messageInfo_ClusterJoinRequest.Size(m) -} -func (m *ClusterJoinRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterJoinRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterJoinRequest proto.InternalMessageInfo - -func (m *ClusterJoinRequest) GetNode() *Node { - if m != nil { - return m.Node - } - return nil -} - -type ClusterLeaveRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClusterLeaveRequest) Reset() { *m = ClusterLeaveRequest{} } -func (m *ClusterLeaveRequest) String() string { return proto.CompactTextString(m) } -func (*ClusterLeaveRequest) ProtoMessage() {} -func (*ClusterLeaveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{7} -} - -func (m *ClusterLeaveRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClusterLeaveRequest.Unmarshal(m, b) -} -func (m *ClusterLeaveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClusterLeaveRequest.Marshal(b, m, deterministic) -} -func (m *ClusterLeaveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterLeaveRequest.Merge(m, src) -} -func (m *ClusterLeaveRequest) XXX_Size() int { - return xxx_messageInfo_ClusterLeaveRequest.Size(m) -} -func (m *ClusterLeaveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterLeaveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterLeaveRequest proto.InternalMessageInfo - -func (m *ClusterLeaveRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -type ClusterInfoResponse struct { - Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClusterInfoResponse) Reset() { *m = ClusterInfoResponse{} } -func (m *ClusterInfoResponse) String() string { return proto.CompactTextString(m) } -func (*ClusterInfoResponse) ProtoMessage() {} -func (*ClusterInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{8} -} - -func (m *ClusterInfoResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClusterInfoResponse.Unmarshal(m, b) -} -func (m *ClusterInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClusterInfoResponse.Marshal(b, m, deterministic) -} -func (m *ClusterInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterInfoResponse.Merge(m, src) -} -func (m *ClusterInfoResponse) XXX_Size() int { - return xxx_messageInfo_ClusterInfoResponse.Size(m) -} -func (m *ClusterInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterInfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterInfoResponse proto.InternalMessageInfo - -func (m *ClusterInfoResponse) GetCluster() *Cluster { - if m != nil { - return m.Cluster - } - return nil -} - -type ClusterWatchResponse struct { - Event ClusterWatchResponse_Event `protobuf:"varint,1,opt,name=event,proto3,enum=index.ClusterWatchResponse_Event" json:"event,omitempty"` - Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` - Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster,proto3" json:"cluster,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClusterWatchResponse) Reset() { *m = ClusterWatchResponse{} } -func (m *ClusterWatchResponse) String() string { return proto.CompactTextString(m) } -func (*ClusterWatchResponse) ProtoMessage() {} -func (*ClusterWatchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{9} -} - -func (m *ClusterWatchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClusterWatchResponse.Unmarshal(m, b) -} -func (m *ClusterWatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClusterWatchResponse.Marshal(b, m, deterministic) -} -func (m *ClusterWatchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterWatchResponse.Merge(m, src) -} -func (m *ClusterWatchResponse) XXX_Size() int { - return xxx_messageInfo_ClusterWatchResponse.Size(m) -} -func (m *ClusterWatchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterWatchResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterWatchResponse proto.InternalMessageInfo - -func (m *ClusterWatchResponse) GetEvent() ClusterWatchResponse_Event { - if m != nil { - return m.Event - } - return ClusterWatchResponse_UNKNOWN -} - -func (m *ClusterWatchResponse) GetNode() *Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *ClusterWatchResponse) GetCluster() *Cluster { - if m != nil { - return m.Cluster - } - return nil -} - -type GetRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetRequest) Reset() { *m = GetRequest{} } -func (m *GetRequest) String() string { return proto.CompactTextString(m) } -func (*GetRequest) ProtoMessage() {} -func (*GetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{10} -} - -func (m *GetRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetRequest.Unmarshal(m, b) -} -func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) -} -func (m *GetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetRequest.Merge(m, src) -} -func (m *GetRequest) XXX_Size() int { - return xxx_messageInfo_GetRequest.Size(m) -} -func (m *GetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetRequest proto.InternalMessageInfo - -func (m *GetRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -type GetResponse struct { - // Document document = 1; - Fields *any.Any `protobuf:"bytes,1,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetResponse) Reset() { *m = GetResponse{} } -func (m *GetResponse) String() string { return proto.CompactTextString(m) } -func (*GetResponse) ProtoMessage() {} -func (*GetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{11} -} - -func (m *GetResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetResponse.Unmarshal(m, b) -} -func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) -} -func (m *GetResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetResponse.Merge(m, src) -} -func (m *GetResponse) XXX_Size() int { - return xxx_messageInfo_GetResponse.Size(m) -} -func (m *GetResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetResponse proto.InternalMessageInfo - -func (m *GetResponse) GetFields() *any.Any { - if m != nil { - return m.Fields - } - return nil -} - -type IndexRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexRequest) Reset() { *m = IndexRequest{} } -func (m *IndexRequest) String() string { return proto.CompactTextString(m) } -func (*IndexRequest) ProtoMessage() {} -func (*IndexRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{12} -} - -func (m *IndexRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexRequest.Unmarshal(m, b) -} -func (m *IndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexRequest.Marshal(b, m, deterministic) -} -func (m *IndexRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexRequest.Merge(m, src) -} -func (m *IndexRequest) XXX_Size() int { - return xxx_messageInfo_IndexRequest.Size(m) -} -func (m *IndexRequest) XXX_DiscardUnknown() { - xxx_messageInfo_IndexRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexRequest proto.InternalMessageInfo - -func (m *IndexRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *IndexRequest) GetFields() *any.Any { - if m != nil { - return m.Fields - } - return nil -} - -type DeleteRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRequest) ProtoMessage() {} -func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{13} -} - -func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) -} -func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) -} -func (m *DeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRequest.Merge(m, src) -} -func (m *DeleteRequest) XXX_Size() int { - return xxx_messageInfo_DeleteRequest.Size(m) -} -func (m *DeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo - -func (m *DeleteRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -type Document struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Document) Reset() { *m = Document{} } -func (m *Document) String() string { return proto.CompactTextString(m) } -func (*Document) ProtoMessage() {} -func (*Document) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{14} -} - -func (m *Document) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Document.Unmarshal(m, b) -} -func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Document.Marshal(b, m, deterministic) -} -func (m *Document) XXX_Merge(src proto.Message) { - xxx_messageInfo_Document.Merge(m, src) -} -func (m *Document) XXX_Size() int { - return xxx_messageInfo_Document.Size(m) -} -func (m *Document) XXX_DiscardUnknown() { - xxx_messageInfo_Document.DiscardUnknown(m) -} - -var xxx_messageInfo_Document proto.InternalMessageInfo - -func (m *Document) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *Document) GetFields() *any.Any { - if m != nil { - return m.Fields - } - return nil -} - -type BulkIndexRequest struct { - Documents []*Document `protobuf:"bytes,1,rep,name=documents,proto3" json:"documents,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BulkIndexRequest) Reset() { *m = BulkIndexRequest{} } -func (m *BulkIndexRequest) String() string { return proto.CompactTextString(m) } -func (*BulkIndexRequest) ProtoMessage() {} -func (*BulkIndexRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{15} -} - -func (m *BulkIndexRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkIndexRequest.Unmarshal(m, b) -} -func (m *BulkIndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkIndexRequest.Marshal(b, m, deterministic) -} -func (m *BulkIndexRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkIndexRequest.Merge(m, src) -} -func (m *BulkIndexRequest) XXX_Size() int { - return xxx_messageInfo_BulkIndexRequest.Size(m) -} -func (m *BulkIndexRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BulkIndexRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BulkIndexRequest proto.InternalMessageInfo - -func (m *BulkIndexRequest) GetDocuments() []*Document { - if m != nil { - return m.Documents - } - return nil -} - -type BulkIndexResponse struct { - Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BulkIndexResponse) Reset() { *m = BulkIndexResponse{} } -func (m *BulkIndexResponse) String() string { return proto.CompactTextString(m) } -func (*BulkIndexResponse) ProtoMessage() {} -func (*BulkIndexResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{16} -} - -func (m *BulkIndexResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkIndexResponse.Unmarshal(m, b) -} -func (m *BulkIndexResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkIndexResponse.Marshal(b, m, deterministic) -} -func (m *BulkIndexResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkIndexResponse.Merge(m, src) -} -func (m *BulkIndexResponse) XXX_Size() int { - return xxx_messageInfo_BulkIndexResponse.Size(m) -} -func (m *BulkIndexResponse) XXX_DiscardUnknown() { - xxx_messageInfo_BulkIndexResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_BulkIndexResponse proto.InternalMessageInfo - -func (m *BulkIndexResponse) GetCount() int32 { - if m != nil { - return m.Count - } - return 0 -} - -type BulkDeleteRequest struct { - Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BulkDeleteRequest) Reset() { *m = BulkDeleteRequest{} } -func (m *BulkDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*BulkDeleteRequest) ProtoMessage() {} -func (*BulkDeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{17} -} - -func (m *BulkDeleteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkDeleteRequest.Unmarshal(m, b) -} -func (m *BulkDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkDeleteRequest.Marshal(b, m, deterministic) -} -func (m *BulkDeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkDeleteRequest.Merge(m, src) -} -func (m *BulkDeleteRequest) XXX_Size() int { - return xxx_messageInfo_BulkDeleteRequest.Size(m) -} -func (m *BulkDeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BulkDeleteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BulkDeleteRequest proto.InternalMessageInfo - -func (m *BulkDeleteRequest) GetIds() []string { - if m != nil { - return m.Ids - } - return nil -} - -type BulkDeleteResponse struct { - Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BulkDeleteResponse) Reset() { *m = BulkDeleteResponse{} } -func (m *BulkDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*BulkDeleteResponse) ProtoMessage() {} -func (*BulkDeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{18} -} - -func (m *BulkDeleteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkDeleteResponse.Unmarshal(m, b) -} -func (m *BulkDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkDeleteResponse.Marshal(b, m, deterministic) -} -func (m *BulkDeleteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkDeleteResponse.Merge(m, src) -} -func (m *BulkDeleteResponse) XXX_Size() int { - return xxx_messageInfo_BulkDeleteResponse.Size(m) -} -func (m *BulkDeleteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_BulkDeleteResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_BulkDeleteResponse proto.InternalMessageInfo - -func (m *BulkDeleteResponse) GetCount() int32 { - if m != nil { - return m.Count - } - return 0 -} - -type SearchRequest struct { - SearchRequest *any.Any `protobuf:"bytes,1,opt,name=search_request,json=searchRequest,proto3" json:"search_request,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SearchRequest) Reset() { *m = SearchRequest{} } -func (m *SearchRequest) String() string { return proto.CompactTextString(m) } -func (*SearchRequest) ProtoMessage() {} -func (*SearchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{19} -} - -func (m *SearchRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SearchRequest.Unmarshal(m, b) -} -func (m *SearchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SearchRequest.Marshal(b, m, deterministic) -} -func (m *SearchRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SearchRequest.Merge(m, src) -} -func (m *SearchRequest) XXX_Size() int { - return xxx_messageInfo_SearchRequest.Size(m) -} -func (m *SearchRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SearchRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SearchRequest proto.InternalMessageInfo - -func (m *SearchRequest) GetSearchRequest() *any.Any { - if m != nil { - return m.SearchRequest - } - return nil -} - -type SearchResponse struct { - SearchResult *any.Any `protobuf:"bytes,1,opt,name=search_result,json=searchResult,proto3" json:"search_result,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SearchResponse) Reset() { *m = SearchResponse{} } -func (m *SearchResponse) String() string { return proto.CompactTextString(m) } -func (*SearchResponse) ProtoMessage() {} -func (*SearchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{20} -} - -func (m *SearchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SearchResponse.Unmarshal(m, b) -} -func (m *SearchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SearchResponse.Marshal(b, m, deterministic) -} -func (m *SearchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SearchResponse.Merge(m, src) -} -func (m *SearchResponse) XXX_Size() int { - return xxx_messageInfo_SearchResponse.Size(m) -} -func (m *SearchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SearchResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SearchResponse proto.InternalMessageInfo - -func (m *SearchResponse) GetSearchResult() *any.Any { - if m != nil { - return m.SearchResult - } - return nil -} - -type IndexConfig struct { - IndexMapping *any.Any `protobuf:"bytes,1,opt,name=index_mapping,json=indexMapping,proto3" json:"index_mapping,omitempty"` - IndexType string `protobuf:"bytes,2,opt,name=index_type,json=indexType,proto3" json:"index_type,omitempty"` - IndexStorageType string `protobuf:"bytes,3,opt,name=index_storage_type,json=indexStorageType,proto3" json:"index_storage_type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexConfig) Reset() { *m = IndexConfig{} } -func (m *IndexConfig) String() string { return proto.CompactTextString(m) } -func (*IndexConfig) ProtoMessage() {} -func (*IndexConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{21} -} - -func (m *IndexConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexConfig.Unmarshal(m, b) -} -func (m *IndexConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexConfig.Marshal(b, m, deterministic) -} -func (m *IndexConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexConfig.Merge(m, src) -} -func (m *IndexConfig) XXX_Size() int { - return xxx_messageInfo_IndexConfig.Size(m) -} -func (m *IndexConfig) XXX_DiscardUnknown() { - xxx_messageInfo_IndexConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexConfig proto.InternalMessageInfo - -func (m *IndexConfig) GetIndexMapping() *any.Any { - if m != nil { - return m.IndexMapping - } - return nil -} - -func (m *IndexConfig) GetIndexType() string { - if m != nil { - return m.IndexType - } - return "" -} - -func (m *IndexConfig) GetIndexStorageType() string { - if m != nil { - return m.IndexStorageType - } - return "" -} - -type GetIndexConfigResponse struct { - IndexConfig *IndexConfig `protobuf:"bytes,1,opt,name=index_config,json=indexConfig,proto3" json:"index_config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetIndexConfigResponse) Reset() { *m = GetIndexConfigResponse{} } -func (m *GetIndexConfigResponse) String() string { return proto.CompactTextString(m) } -func (*GetIndexConfigResponse) ProtoMessage() {} -func (*GetIndexConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{22} -} - -func (m *GetIndexConfigResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetIndexConfigResponse.Unmarshal(m, b) -} -func (m *GetIndexConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetIndexConfigResponse.Marshal(b, m, deterministic) -} -func (m *GetIndexConfigResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetIndexConfigResponse.Merge(m, src) -} -func (m *GetIndexConfigResponse) XXX_Size() int { - return xxx_messageInfo_GetIndexConfigResponse.Size(m) -} -func (m *GetIndexConfigResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetIndexConfigResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetIndexConfigResponse proto.InternalMessageInfo - -func (m *GetIndexConfigResponse) GetIndexConfig() *IndexConfig { - if m != nil { - return m.IndexConfig - } - return nil -} - -type GetIndexStatsResponse struct { - IndexStats *any.Any `protobuf:"bytes,1,opt,name=index_stats,json=indexStats,proto3" json:"index_stats,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetIndexStatsResponse) Reset() { *m = GetIndexStatsResponse{} } -func (m *GetIndexStatsResponse) String() string { return proto.CompactTextString(m) } -func (*GetIndexStatsResponse) ProtoMessage() {} -func (*GetIndexStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{23} -} - -func (m *GetIndexStatsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetIndexStatsResponse.Unmarshal(m, b) -} -func (m *GetIndexStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetIndexStatsResponse.Marshal(b, m, deterministic) -} -func (m *GetIndexStatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetIndexStatsResponse.Merge(m, src) -} -func (m *GetIndexStatsResponse) XXX_Size() int { - return xxx_messageInfo_GetIndexStatsResponse.Size(m) -} -func (m *GetIndexStatsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetIndexStatsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetIndexStatsResponse proto.InternalMessageInfo - -func (m *GetIndexStatsResponse) GetIndexStats() *any.Any { - if m != nil { - return m.IndexStats - } - return nil -} - -type Proposal struct { - Event Proposal_Event `protobuf:"varint,1,opt,name=event,proto3,enum=index.Proposal_Event" json:"event,omitempty"` - Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` - Document *Document `protobuf:"bytes,3,opt,name=document,proto3" json:"document,omitempty"` - Id string `protobuf:"bytes,4,opt,name=id,proto3" json:"id,omitempty"` - Documents []*Document `protobuf:"bytes,5,rep,name=documents,proto3" json:"documents,omitempty"` - Ids []string `protobuf:"bytes,6,rep,name=ids,proto3" json:"ids,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Proposal) Reset() { *m = Proposal{} } -func (m *Proposal) String() string { return proto.CompactTextString(m) } -func (*Proposal) ProtoMessage() {} -func (*Proposal) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{24} -} - -func (m *Proposal) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Proposal.Unmarshal(m, b) -} -func (m *Proposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Proposal.Marshal(b, m, deterministic) -} -func (m *Proposal) XXX_Merge(src proto.Message) { - xxx_messageInfo_Proposal.Merge(m, src) -} -func (m *Proposal) XXX_Size() int { - return xxx_messageInfo_Proposal.Size(m) -} -func (m *Proposal) XXX_DiscardUnknown() { - xxx_messageInfo_Proposal.DiscardUnknown(m) -} - -var xxx_messageInfo_Proposal proto.InternalMessageInfo - -func (m *Proposal) GetEvent() Proposal_Event { - if m != nil { - return m.Event - } - return Proposal_UNKNOWN -} - -func (m *Proposal) GetNode() *Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *Proposal) GetDocument() *Document { - if m != nil { - return m.Document - } - return nil -} - -func (m *Proposal) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *Proposal) GetDocuments() []*Document { - if m != nil { - return m.Documents - } - return nil -} - -func (m *Proposal) GetIds() []string { - if m != nil { - return m.Ids - } - return nil -} - -func init() { - proto.RegisterEnum("index.NodeHealthCheckRequest_Probe", NodeHealthCheckRequest_Probe_name, NodeHealthCheckRequest_Probe_value) - proto.RegisterEnum("index.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) - proto.RegisterEnum("index.Node_State", Node_State_name, Node_State_value) - proto.RegisterEnum("index.ClusterWatchResponse_Event", ClusterWatchResponse_Event_name, ClusterWatchResponse_Event_value) - proto.RegisterEnum("index.Proposal_Event", Proposal_Event_name, Proposal_Event_value) - proto.RegisterType((*NodeHealthCheckRequest)(nil), "index.NodeHealthCheckRequest") - proto.RegisterType((*NodeHealthCheckResponse)(nil), "index.NodeHealthCheckResponse") - proto.RegisterType((*Metadata)(nil), "index.Metadata") - proto.RegisterType((*Node)(nil), "index.Node") - proto.RegisterType((*Cluster)(nil), "index.Cluster") - proto.RegisterMapType((map[string]*Node)(nil), "index.Cluster.NodesEntry") - proto.RegisterType((*NodeInfoResponse)(nil), "index.NodeInfoResponse") - proto.RegisterType((*ClusterJoinRequest)(nil), "index.ClusterJoinRequest") - proto.RegisterType((*ClusterLeaveRequest)(nil), "index.ClusterLeaveRequest") - proto.RegisterType((*ClusterInfoResponse)(nil), "index.ClusterInfoResponse") - proto.RegisterType((*ClusterWatchResponse)(nil), "index.ClusterWatchResponse") - proto.RegisterType((*GetRequest)(nil), "index.GetRequest") - proto.RegisterType((*GetResponse)(nil), "index.GetResponse") - proto.RegisterType((*IndexRequest)(nil), "index.IndexRequest") - proto.RegisterType((*DeleteRequest)(nil), "index.DeleteRequest") - proto.RegisterType((*Document)(nil), "index.Document") - proto.RegisterType((*BulkIndexRequest)(nil), "index.BulkIndexRequest") - proto.RegisterType((*BulkIndexResponse)(nil), "index.BulkIndexResponse") - proto.RegisterType((*BulkDeleteRequest)(nil), "index.BulkDeleteRequest") - proto.RegisterType((*BulkDeleteResponse)(nil), "index.BulkDeleteResponse") - proto.RegisterType((*SearchRequest)(nil), "index.SearchRequest") - proto.RegisterType((*SearchResponse)(nil), "index.SearchResponse") - proto.RegisterType((*IndexConfig)(nil), "index.IndexConfig") - proto.RegisterType((*GetIndexConfigResponse)(nil), "index.GetIndexConfigResponse") - proto.RegisterType((*GetIndexStatsResponse)(nil), "index.GetIndexStatsResponse") - proto.RegisterType((*Proposal)(nil), "index.Proposal") -} - -func init() { proto.RegisterFile("protobuf/index/index.proto", fileDescriptor_7b2daf652facb3ae) } - -var fileDescriptor_7b2daf652facb3ae = []byte{ - // 1454 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0xdf, 0x72, 0xda, 0xc6, - 0x17, 0xb6, 0x00, 0x61, 0x7c, 0x04, 0x58, 0xd9, 0x60, 0x3b, 0x51, 0xec, 0x5f, 0xe2, 0xfd, 0x35, - 0xad, 0x4b, 0x5a, 0x48, 0x9d, 0x66, 0xda, 0x38, 0xed, 0x74, 0xb0, 0x51, 0x6d, 0x62, 0x02, 0x19, - 0x81, 0x93, 0x26, 0x33, 0x1d, 0x46, 0xc0, 0x1a, 0x54, 0x63, 0x89, 0x22, 0xe1, 0x96, 0xe9, 0xf4, - 0xa2, 0x79, 0x85, 0x4e, 0xa7, 0x6f, 0xd2, 0x17, 0xe8, 0x13, 0x74, 0x7a, 0x9b, 0xcb, 0x3e, 0x48, - 0x67, 0xff, 0x48, 0x48, 0xd8, 0x90, 0x76, 0x7a, 0xe3, 0x61, 0xcf, 0xf9, 0xce, 0xb7, 0xdf, 0x39, - 0x3a, 0xbb, 0x67, 0x0d, 0xda, 0x70, 0xe4, 0x78, 0x4e, 0x7b, 0x7c, 0x5a, 0xb4, 0xec, 0x2e, 0xf9, - 0x9e, 0xff, 0x2d, 0x30, 0x23, 0x92, 0xd9, 0x42, 0xbb, 0xd9, 0x73, 0x9c, 0xde, 0x80, 0x14, 0x03, - 0xa4, 0x69, 0x4f, 0x38, 0x42, 0xbb, 0x35, 0xeb, 0x22, 0xe7, 0x43, 0xcf, 0x77, 0x6e, 0x0a, 0xa7, - 0x39, 0xb4, 0x8a, 0xa6, 0x6d, 0x3b, 0x9e, 0xe9, 0x59, 0x8e, 0xed, 0x72, 0x2f, 0xfe, 0x55, 0x82, - 0xf5, 0x9a, 0xd3, 0x25, 0x47, 0xc4, 0x1c, 0x78, 0xfd, 0x83, 0x3e, 0xe9, 0x9c, 0x19, 0xe4, 0xdb, - 0x31, 0x71, 0x3d, 0xf4, 0x08, 0xe4, 0xe1, 0xc8, 0x69, 0x93, 0x1b, 0xd2, 0x1d, 0x69, 0x27, 0xbb, - 0xfb, 0xff, 0x02, 0x17, 0x75, 0x35, 0xba, 0xf0, 0x8c, 0x42, 0x0d, 0x1e, 0x81, 0xf7, 0x41, 0x66, - 0x6b, 0xa4, 0xc0, 0xf2, 0x49, 0xed, 0xb8, 0x56, 0x7f, 0x51, 0x53, 0x97, 0xd0, 0x2a, 0x28, 0x47, - 0x7a, 0xa9, 0xda, 0x3c, 0xaa, 0xd4, 0xf4, 0x46, 0x43, 0x95, 0x50, 0x1a, 0x52, 0xd5, 0xca, 0x73, - 0x9d, 0xad, 0x62, 0x28, 0x03, 0x2b, 0x86, 0x5e, 0x2a, 0x73, 0x67, 0x1c, 0xff, 0x26, 0xc1, 0xc6, - 0xa5, 0xbd, 0xdc, 0xa1, 0x63, 0xbb, 0x04, 0xed, 0x81, 0xec, 0x7a, 0xa6, 0xe7, 0x4b, 0x7b, 0x67, - 0x9e, 0x34, 0x0e, 0x2f, 0x34, 0x28, 0xd6, 0xe0, 0x21, 0xb8, 0x05, 0x32, 0x5b, 0x47, 0xb5, 0x29, - 0xb0, 0xcc, 0xb5, 0xbd, 0x54, 0x25, 0xaa, 0xe4, 0xa4, 0xe6, 0x2f, 0x63, 0x68, 0x05, 0xe4, 0x12, - 0xd5, 0xa9, 0xc6, 0x51, 0x0a, 0x12, 0x65, 0xbd, 0x54, 0x56, 0x13, 0xd4, 0x48, 0xd5, 0xbe, 0x54, - 0x65, 0x0a, 0xaf, 0xd5, 0x9b, 0x2d, 0xbe, 0x4c, 0xe2, 0xd7, 0x12, 0xa4, 0x9e, 0x12, 0xcf, 0xec, - 0x9a, 0x9e, 0x89, 0xb6, 0x21, 0xdd, 0x1b, 0x0d, 0x3b, 0x2d, 0xb3, 0xdb, 0x1d, 0x11, 0xd7, 0x65, - 0x82, 0x57, 0x0c, 0x85, 0xda, 0x4a, 0xdc, 0x84, 0xee, 0x43, 0x8e, 0x41, 0x7a, 0xa6, 0x47, 0xbe, - 0x33, 0x27, 0x01, 0x34, 0xc6, 0xa0, 0x88, 0xfa, 0x0e, 0xb9, 0xcb, 0x8f, 0xd8, 0x86, 0x74, 0xdf, - 0xf3, 0x86, 0x01, 0x32, 0xce, 0x49, 0xa9, 0x4d, 0x40, 0xf0, 0x1b, 0x09, 0x12, 0xb4, 0x1c, 0x28, - 0x0b, 0x31, 0xab, 0x2b, 0xb6, 0x8d, 0x59, 0x5d, 0x1a, 0xdb, 0xb6, 0xec, 0xee, 0xcc, 0x2e, 0x0a, - 0xb5, 0xf9, 0xf4, 0xef, 0xf9, 0xd5, 0x8d, 0xb3, 0xea, 0x5e, 0x0b, 0x55, 0x37, 0x52, 0x4a, 0x74, - 0x0f, 0x52, 0xe7, 0x22, 0xd1, 0x1b, 0x89, 0x3b, 0xd2, 0x8e, 0xb2, 0xbb, 0x2a, 0xb0, 0x7e, 0xfe, - 0x46, 0x00, 0xc0, 0xc7, 0x57, 0xd6, 0x3d, 0x0d, 0xa9, 0x2f, 0xeb, 0xd5, 0x6a, 0xfd, 0x85, 0x6e, - 0xf0, 0xc2, 0x1f, 0x94, 0x6a, 0xe5, 0x4a, 0xb9, 0xd4, 0xd4, 0xd5, 0x18, 0x02, 0x48, 0x56, 0xf5, - 0x52, 0x59, 0x37, 0xd4, 0x38, 0x05, 0x36, 0x8e, 0x4e, 0x9a, 0x65, 0x1a, 0x96, 0xc0, 0x3f, 0x49, - 0xb0, 0x7c, 0x30, 0x18, 0xbb, 0x1e, 0x19, 0xa1, 0x22, 0xc8, 0xb6, 0xd3, 0x25, 0xb4, 0xb6, 0xf1, - 0x1d, 0x65, 0xf7, 0xa6, 0x90, 0x20, 0xdc, 0x4c, 0xb6, 0xab, 0xdb, 0xde, 0x68, 0x62, 0x70, 0x9c, - 0xa6, 0x03, 0x4c, 0x8d, 0x48, 0x85, 0xf8, 0x19, 0x99, 0x88, 0x0a, 0xd1, 0x9f, 0x68, 0x1b, 0xe4, - 0x0b, 0x73, 0x30, 0x26, 0xac, 0x36, 0xca, 0xae, 0x12, 0xca, 0xdf, 0xe0, 0x9e, 0xbd, 0xd8, 0xa7, - 0x12, 0x7e, 0x00, 0x2a, 0x35, 0x55, 0xec, 0x53, 0x27, 0x68, 0xcc, 0xdb, 0x90, 0xa0, 0x7b, 0x30, - 0xb6, 0x99, 0x48, 0xe6, 0xc0, 0x0f, 0x01, 0x09, 0x61, 0x4f, 0x1c, 0xcb, 0xf6, 0x8f, 0xda, 0x5b, - 0xc3, 0xee, 0xc2, 0x75, 0x11, 0x56, 0x25, 0xe6, 0x05, 0xf1, 0xe3, 0x66, 0x3e, 0x2e, 0xfe, 0x22, - 0x80, 0x45, 0x54, 0xed, 0xc0, 0x72, 0x87, 0x9b, 0xc5, 0x0e, 0xd9, 0x68, 0x8d, 0x0c, 0xdf, 0x8d, - 0xff, 0x90, 0x20, 0x27, 0x8c, 0x2f, 0x4c, 0xaf, 0xd3, 0x0f, 0x28, 0x3e, 0x01, 0x99, 0x5c, 0x10, - 0xdb, 0x13, 0x27, 0x6e, 0x3b, 0x4a, 0x10, 0xc1, 0x16, 0x74, 0x0a, 0x34, 0x38, 0x3e, 0x48, 0x2d, - 0x36, 0x27, 0xb5, 0xb0, 0xb8, 0xf8, 0x62, 0x71, 0x0f, 0x41, 0x66, 0xd4, 0xd1, 0x0e, 0x4a, 0x41, - 0xe2, 0x49, 0xbd, 0x52, 0x53, 0x25, 0x7a, 0x24, 0xab, 0x7a, 0xe9, 0xb9, 0xe8, 0x9c, 0x93, 0x67, - 0xac, 0x8b, 0xe2, 0x78, 0x13, 0xe0, 0x90, 0x78, 0xf3, 0x4a, 0xf6, 0x18, 0x14, 0xe6, 0x15, 0x79, - 0x7e, 0x00, 0xc9, 0x53, 0x8b, 0x0c, 0xba, 0xae, 0xa8, 0x54, 0xae, 0xc0, 0xaf, 0xcf, 0x82, 0x7f, - 0xb7, 0x16, 0x4a, 0xf6, 0xc4, 0x10, 0x18, 0x5c, 0x85, 0x74, 0x85, 0x6a, 0x9d, 0x43, 0x1e, 0x62, - 0x8b, 0xfd, 0x03, 0xb6, 0xdb, 0x90, 0x29, 0x93, 0x01, 0xf1, 0xe6, 0x7e, 0xde, 0x23, 0x48, 0x95, - 0x9d, 0xce, 0xf8, 0x9c, 0xd6, 0xe0, 0xbf, 0x6d, 0x55, 0x02, 0x75, 0x7f, 0x3c, 0x38, 0x8b, 0x88, - 0xff, 0x10, 0x56, 0xba, 0x82, 0xdd, 0x3f, 0x4b, 0xfe, 0x71, 0xf6, 0x77, 0x35, 0xa6, 0x08, 0xfc, - 0x3e, 0x5c, 0x0b, 0x51, 0x88, 0xf2, 0xe5, 0x40, 0xee, 0x38, 0x63, 0xd1, 0x26, 0xb2, 0xc1, 0x17, - 0xf8, 0x2e, 0x87, 0x46, 0x93, 0x53, 0x21, 0x6e, 0x75, 0xf9, 0x46, 0x2b, 0x06, 0xfd, 0x89, 0xf3, - 0x80, 0xc2, 0xb0, 0x85, 0x94, 0x55, 0xc8, 0x34, 0x88, 0x39, 0xa2, 0x5d, 0xc7, 0xe9, 0x1e, 0x43, - 0xd6, 0x65, 0x86, 0xd6, 0x88, 0x5b, 0x16, 0x7e, 0xc0, 0x8c, 0x1b, 0x0e, 0xc6, 0xc7, 0x90, 0xf5, - 0xd9, 0xc4, 0xae, 0x8f, 0x20, 0x13, 0xd0, 0xb9, 0xe3, 0xc1, 0x62, 0xb6, 0xb4, 0xcf, 0x46, 0x91, - 0xf8, 0x17, 0x09, 0x14, 0x56, 0x95, 0x03, 0xc7, 0x3e, 0xb5, 0x7a, 0x94, 0x8a, 0x55, 0xb1, 0x75, - 0x6e, 0x0e, 0x87, 0x96, 0xdd, 0x5b, 0x4c, 0xc5, 0xa0, 0x4f, 0x39, 0x12, 0x6d, 0x01, 0xf0, 0x50, - 0x6f, 0x32, 0x24, 0xe2, 0xaa, 0x5e, 0x61, 0x96, 0xe6, 0x64, 0x48, 0x9b, 0x15, 0x71, 0xb7, 0xeb, - 0x39, 0x23, 0xb3, 0x47, 0x38, 0x8c, 0x4f, 0x03, 0x95, 0x79, 0x1a, 0xdc, 0x41, 0xd1, 0xb8, 0x0e, - 0xeb, 0x87, 0xc4, 0x0b, 0x29, 0x0b, 0x92, 0x7d, 0x08, 0x7c, 0xdb, 0x56, 0x87, 0xd9, 0x85, 0x40, - 0x24, 0x3e, 0x7e, 0x38, 0x42, 0xb1, 0xa6, 0x0b, 0x5c, 0x83, 0x35, 0x9f, 0x90, 0xde, 0xec, 0x6e, - 0x88, 0x4f, 0xf1, 0x75, 0x99, 0xde, 0xe2, 0x93, 0x04, 0x56, 0x10, 0x8e, 0x7f, 0x8f, 0x41, 0xea, - 0xd9, 0xc8, 0x19, 0x3a, 0xae, 0x39, 0x40, 0xf7, 0xa2, 0x17, 0xce, 0x9a, 0x10, 0xe3, 0xfb, 0xff, - 0xe5, 0x25, 0x73, 0x0f, 0x52, 0x7e, 0xe7, 0x8a, 0x5b, 0xe6, 0x52, 0x6b, 0x07, 0x00, 0x71, 0xb4, - 0x12, 0xc1, 0xd1, 0x8a, 0x1c, 0x0c, 0xf9, 0x6d, 0x07, 0xc3, 0x6f, 0xec, 0xe4, 0xb4, 0xb1, 0xcf, - 0xae, 0xbc, 0xb8, 0xe8, 0x44, 0xd3, 0x9b, 0xad, 0x5a, 0xbd, 0xac, 0xab, 0x12, 0x7d, 0x1c, 0x95, - 0xf5, 0xaa, 0xde, 0xd4, 0xb9, 0x81, 0xbd, 0x3a, 0x2a, 0xb5, 0xb2, 0xfe, 0x95, 0x1a, 0xa7, 0xb7, - 0x19, 0xf7, 0xa9, 0x09, 0x94, 0x05, 0xd8, 0x3f, 0xa9, 0x1e, 0xb7, 0xb8, 0x4f, 0xa6, 0x71, 0x6c, - 0x2d, 0x00, 0xc9, 0xdd, 0x37, 0x34, 0x90, 0x8a, 0x43, 0x36, 0xac, 0xce, 0xbc, 0x88, 0xd0, 0xd6, - 0xc2, 0x47, 0x9c, 0xf6, 0xbf, 0xc5, 0x0f, 0x29, 0xbc, 0xf9, 0xfa, 0xcf, 0xbf, 0x7e, 0x8e, 0xad, - 0xa3, 0x5c, 0xf1, 0xe2, 0xa3, 0x22, 0x2d, 0x6c, 0xb1, 0xcf, 0x50, 0x1d, 0x46, 0xde, 0x84, 0x94, - 0x3f, 0x10, 0xd1, 0xfa, 0xa5, 0xaf, 0xad, 0xd3, 0x37, 0xa9, 0xb6, 0x11, 0xda, 0x21, 0x3c, 0xa3, - 0xf0, 0x06, 0xa3, 0xbe, 0x86, 0x56, 0x03, 0x6a, 0xda, 0x3c, 0x63, 0x17, 0xed, 0x83, 0x12, 0x9a, - 0x98, 0x68, 0x66, 0xbc, 0x87, 0xa6, 0xa8, 0x36, 0x67, 0x4f, 0xbc, 0x84, 0xca, 0x90, 0x0e, 0x8f, - 0x4f, 0xa4, 0x45, 0x49, 0xc2, 0x33, 0x75, 0x01, 0xcb, 0xd7, 0x81, 0x92, 0x85, 0x29, 0xce, 0x90, - 0x47, 0xb2, 0xd4, 0x58, 0x96, 0x39, 0x84, 0x68, 0x96, 0x62, 0xae, 0xf9, 0x89, 0x1e, 0x06, 0x22, - 0xd9, 0x38, 0x9d, 0xcb, 0x7f, 0x6b, 0xc1, 0xec, 0xc5, 0x4b, 0xf7, 0x25, 0x74, 0x0c, 0xf1, 0x43, - 0xe2, 0x21, 0xff, 0xdd, 0x36, 0x1d, 0x7e, 0x1a, 0x0a, 0x9b, 0x44, 0xc4, 0x16, 0x93, 0xb4, 0x81, - 0xd6, 0xa8, 0xa4, 0xa0, 0x8b, 0x8b, 0x3f, 0x58, 0xdd, 0xcf, 0xf3, 0xf9, 0x1f, 0xd1, 0x37, 0x7e, - 0x37, 0x5d, 0x0f, 0x5f, 0x07, 0x6f, 0x2b, 0xd6, 0xc7, 0x8c, 0xb4, 0xa0, 0x65, 0x22, 0xa4, 0x7b, - 0x52, 0xfe, 0x95, 0xa6, 0x5d, 0xbd, 0xd1, 0x9e, 0x94, 0x47, 0x27, 0x90, 0xe4, 0x97, 0x3f, 0xca, - 0xf9, 0xe7, 0x2b, 0x3c, 0x32, 0xe6, 0xee, 0x26, 0x52, 0xc8, 0xcf, 0x49, 0xa1, 0x01, 0x2b, 0xc1, - 0xa4, 0x42, 0x7e, 0x03, 0xce, 0x8e, 0x3f, 0xed, 0xc6, 0x65, 0x87, 0xa8, 0xd0, 0x75, 0x46, 0x9f, - 0xd1, 0x52, 0x94, 0xbe, 0x3d, 0x1e, 0x9c, 0x51, 0xad, 0xcf, 0x01, 0xa6, 0xc3, 0x0a, 0x85, 0x83, - 0xa3, 0x9a, 0x6f, 0x5e, 0xe1, 0x89, 0xf2, 0xe6, 0x23, 0xbc, 0x55, 0x48, 0xf2, 0x51, 0x14, 0xd4, - 0x20, 0x32, 0xe7, 0xb4, 0xb5, 0x19, 0xab, 0xe0, 0x5a, 0x63, 0x5c, 0xab, 0x18, 0x28, 0x17, 0x1f, - 0x47, 0x94, 0xad, 0x02, 0xd9, 0xe8, 0x9d, 0x3f, 0xb7, 0xab, 0xb6, 0xa6, 0xad, 0x71, 0xc5, 0x88, - 0xc0, 0x4b, 0xe8, 0x10, 0x32, 0x91, 0xdb, 0x7e, 0x2e, 0xd3, 0xe6, 0x0c, 0x53, 0x64, 0x36, 0xe0, - 0x25, 0xf4, 0x19, 0xa4, 0x1a, 0xb6, 0x39, 0x74, 0xfb, 0x8e, 0x37, 0x97, 0x63, 0xee, 0x21, 0xdc, - 0xdf, 0x79, 0xf5, 0x6e, 0xcf, 0xf2, 0xfa, 0xe3, 0x76, 0xa1, 0xe3, 0x9c, 0x17, 0xcf, 0x1d, 0x77, - 0x7c, 0x66, 0x16, 0xdb, 0x03, 0xd3, 0xf5, 0x8a, 0xd1, 0xff, 0xa1, 0xdb, 0x49, 0xb6, 0x7e, 0xf0, - 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd3, 0x30, 0x9b, 0x20, 0x5c, 0x0f, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// IndexClient is the client API for Index service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type IndexClient interface { - NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) - NodeInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeInfoResponse, error) - ClusterJoin(ctx context.Context, in *ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) - ClusterLeave(ctx context.Context, in *ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) - ClusterInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterInfoResponse, error) - ClusterWatch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_ClusterWatchClient, error) - Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) - Index(ctx context.Context, in *IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) - Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) - BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) - BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) - Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) - GetIndexConfig(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexConfigResponse, error) - GetIndexStats(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexStatsResponse, error) - Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) -} - -type indexClient struct { - cc *grpc.ClientConn -} - -func NewIndexClient(cc *grpc.ClientConn) IndexClient { - return &indexClient{cc} -} - -func (c *indexClient) NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) { - out := new(NodeHealthCheckResponse) - err := c.cc.Invoke(ctx, "/index.Index/NodeHealthCheck", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) NodeInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeInfoResponse, error) { - out := new(NodeInfoResponse) - err := c.cc.Invoke(ctx, "/index.Index/NodeInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) ClusterJoin(ctx context.Context, in *ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/index.Index/ClusterJoin", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) ClusterLeave(ctx context.Context, in *ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/index.Index/ClusterLeave", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) ClusterInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterInfoResponse, error) { - out := new(ClusterInfoResponse) - err := c.cc.Invoke(ctx, "/index.Index/ClusterInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) ClusterWatch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_ClusterWatchClient, error) { - stream, err := c.cc.NewStream(ctx, &_Index_serviceDesc.Streams[0], "/index.Index/ClusterWatch", opts...) - if err != nil { - return nil, err - } - x := &indexClusterWatchClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Index_ClusterWatchClient interface { - Recv() (*ClusterWatchResponse, error) - grpc.ClientStream -} - -type indexClusterWatchClient struct { - grpc.ClientStream -} - -func (x *indexClusterWatchClient) Recv() (*ClusterWatchResponse, error) { - m := new(ClusterWatchResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *indexClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { - out := new(GetResponse) - err := c.cc.Invoke(ctx, "/index.Index/Get", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) Index(ctx context.Context, in *IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/index.Index/Index", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/index.Index/Delete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) { - out := new(BulkIndexResponse) - err := c.cc.Invoke(ctx, "/index.Index/BulkIndex", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) { - out := new(BulkDeleteResponse) - err := c.cc.Invoke(ctx, "/index.Index/BulkDelete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { - out := new(SearchResponse) - err := c.cc.Invoke(ctx, "/index.Index/Search", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) GetIndexConfig(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexConfigResponse, error) { - out := new(GetIndexConfigResponse) - err := c.cc.Invoke(ctx, "/index.Index/GetIndexConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) GetIndexStats(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexStatsResponse, error) { - out := new(GetIndexStatsResponse) - err := c.cc.Invoke(ctx, "/index.Index/GetIndexStats", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/index.Index/Snapshot", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// IndexServer is the server API for Index service. -type IndexServer interface { - NodeHealthCheck(context.Context, *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) - NodeInfo(context.Context, *empty.Empty) (*NodeInfoResponse, error) - ClusterJoin(context.Context, *ClusterJoinRequest) (*empty.Empty, error) - ClusterLeave(context.Context, *ClusterLeaveRequest) (*empty.Empty, error) - ClusterInfo(context.Context, *empty.Empty) (*ClusterInfoResponse, error) - ClusterWatch(*empty.Empty, Index_ClusterWatchServer) error - Get(context.Context, *GetRequest) (*GetResponse, error) - Index(context.Context, *IndexRequest) (*empty.Empty, error) - Delete(context.Context, *DeleteRequest) (*empty.Empty, error) - BulkIndex(context.Context, *BulkIndexRequest) (*BulkIndexResponse, error) - BulkDelete(context.Context, *BulkDeleteRequest) (*BulkDeleteResponse, error) - Search(context.Context, *SearchRequest) (*SearchResponse, error) - GetIndexConfig(context.Context, *empty.Empty) (*GetIndexConfigResponse, error) - GetIndexStats(context.Context, *empty.Empty) (*GetIndexStatsResponse, error) - Snapshot(context.Context, *empty.Empty) (*empty.Empty, error) -} - -// UnimplementedIndexServer can be embedded to have forward compatible implementations. -type UnimplementedIndexServer struct { -} - -func (*UnimplementedIndexServer) NodeHealthCheck(ctx context.Context, req *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodeHealthCheck not implemented") -} -func (*UnimplementedIndexServer) NodeInfo(ctx context.Context, req *empty.Empty) (*NodeInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodeInfo not implemented") -} -func (*UnimplementedIndexServer) ClusterJoin(ctx context.Context, req *ClusterJoinRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method ClusterJoin not implemented") -} -func (*UnimplementedIndexServer) ClusterLeave(ctx context.Context, req *ClusterLeaveRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method ClusterLeave not implemented") -} -func (*UnimplementedIndexServer) ClusterInfo(ctx context.Context, req *empty.Empty) (*ClusterInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ClusterInfo not implemented") -} -func (*UnimplementedIndexServer) ClusterWatch(req *empty.Empty, srv Index_ClusterWatchServer) error { - return status.Errorf(codes.Unimplemented, "method ClusterWatch not implemented") -} -func (*UnimplementedIndexServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") -} -func (*UnimplementedIndexServer) Index(ctx context.Context, req *IndexRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Index not implemented") -} -func (*UnimplementedIndexServer) Delete(ctx context.Context, req *DeleteRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} -func (*UnimplementedIndexServer) BulkIndex(ctx context.Context, req *BulkIndexRequest) (*BulkIndexResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method BulkIndex not implemented") -} -func (*UnimplementedIndexServer) BulkDelete(ctx context.Context, req *BulkDeleteRequest) (*BulkDeleteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method BulkDelete not implemented") -} -func (*UnimplementedIndexServer) Search(ctx context.Context, req *SearchRequest) (*SearchResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Search not implemented") -} -func (*UnimplementedIndexServer) GetIndexConfig(ctx context.Context, req *empty.Empty) (*GetIndexConfigResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetIndexConfig not implemented") -} -func (*UnimplementedIndexServer) GetIndexStats(ctx context.Context, req *empty.Empty) (*GetIndexStatsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetIndexStats not implemented") -} -func (*UnimplementedIndexServer) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Snapshot not implemented") -} - -func RegisterIndexServer(s *grpc.Server, srv IndexServer) { - s.RegisterService(&_Index_serviceDesc, srv) -} - -func _Index_NodeHealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeHealthCheckRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).NodeHealthCheck(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/NodeHealthCheck", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).NodeHealthCheck(ctx, req.(*NodeHealthCheckRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_NodeInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).NodeInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/NodeInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).NodeInfo(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_ClusterJoin_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ClusterJoinRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).ClusterJoin(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/ClusterJoin", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).ClusterJoin(ctx, req.(*ClusterJoinRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_ClusterLeave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ClusterLeaveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).ClusterLeave(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/ClusterLeave", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).ClusterLeave(ctx, req.(*ClusterLeaveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_ClusterInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).ClusterInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/ClusterInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).ClusterInfo(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_ClusterWatch_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(empty.Empty) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(IndexServer).ClusterWatch(m, &indexClusterWatchServer{stream}) -} - -type Index_ClusterWatchServer interface { - Send(*ClusterWatchResponse) error - grpc.ServerStream -} - -type indexClusterWatchServer struct { - grpc.ServerStream -} - -func (x *indexClusterWatchServer) Send(m *ClusterWatchResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Index_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).Get(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/Get", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).Get(ctx, req.(*GetRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_Index_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(IndexRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).Index(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/Index", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).Index(ctx, req.(*IndexRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).Delete(ctx, req.(*DeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_BulkIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(BulkIndexRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).BulkIndex(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/BulkIndex", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).BulkIndex(ctx, req.(*BulkIndexRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_BulkDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(BulkDeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).BulkDelete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/BulkDelete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).BulkDelete(ctx, req.(*BulkDeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SearchRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).Search(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/Search", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).Search(ctx, req.(*SearchRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_GetIndexConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).GetIndexConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/GetIndexConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).GetIndexConfig(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_GetIndexStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).GetIndexStats(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/GetIndexStats", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).GetIndexStats(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_Snapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).Snapshot(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/Snapshot", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).Snapshot(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -var _Index_serviceDesc = grpc.ServiceDesc{ - ServiceName: "index.Index", - HandlerType: (*IndexServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "NodeHealthCheck", - Handler: _Index_NodeHealthCheck_Handler, - }, - { - MethodName: "NodeInfo", - Handler: _Index_NodeInfo_Handler, - }, - { - MethodName: "ClusterJoin", - Handler: _Index_ClusterJoin_Handler, - }, - { - MethodName: "ClusterLeave", - Handler: _Index_ClusterLeave_Handler, - }, - { - MethodName: "ClusterInfo", - Handler: _Index_ClusterInfo_Handler, - }, - { - MethodName: "Get", - Handler: _Index_Get_Handler, - }, - { - MethodName: "Index", - Handler: _Index_Index_Handler, - }, - { - MethodName: "Delete", - Handler: _Index_Delete_Handler, - }, - { - MethodName: "BulkIndex", - Handler: _Index_BulkIndex_Handler, - }, - { - MethodName: "BulkDelete", - Handler: _Index_BulkDelete_Handler, - }, - { - MethodName: "Search", - Handler: _Index_Search_Handler, - }, - { - MethodName: "GetIndexConfig", - Handler: _Index_GetIndexConfig_Handler, - }, - { - MethodName: "GetIndexStats", - Handler: _Index_GetIndexStats_Handler, - }, - { - MethodName: "Snapshot", - Handler: _Index_Snapshot_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ClusterWatch", - Handler: _Index_ClusterWatch_Handler, - ServerStreams: true, - }, - }, - Metadata: "protobuf/index/index.proto", -} diff --git a/protobuf/index/index.pb.gw.go b/protobuf/index/index.pb.gw.go deleted file mode 100644 index a54291a..0000000 --- a/protobuf/index/index.pb.gw.go +++ /dev/null @@ -1,510 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: protobuf/index/index.proto - -/* -Package index is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package index - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/empty" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray - -var ( - filter_Index_NodeHealthCheck_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Index_NodeHealthCheck_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq NodeHealthCheckRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Index_NodeHealthCheck_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.NodeHealthCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Index_NodeInfo_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq empty.Empty - var metadata runtime.ServerMetadata - - msg, err := client.NodeInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Index_ClusterInfo_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq empty.Empty - var metadata runtime.ServerMetadata - - msg, err := client.ClusterInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Index_Get_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") - } - - protoReq.Id, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) - } - - msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Index_Index_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq IndexRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Index(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Index_Index_1(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq IndexRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") - } - - protoReq.Id, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) - } - - msg, err := client.Index(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Index_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DeleteRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") - } - - protoReq.Id, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) - } - - msg, err := client.Delete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Index_BulkIndex_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq BulkIndexRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.BulkIndex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Index_BulkDelete_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq BulkDeleteRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.BulkDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Index_Search_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SearchRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Search(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -// RegisterIndexHandlerFromEndpoint is same as RegisterIndexHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterIndexHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterIndexHandler(ctx, mux, conn) -} - -// RegisterIndexHandler registers the http handlers for service Index to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterIndexHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterIndexHandlerClient(ctx, mux, NewIndexClient(conn)) -} - -// RegisterIndexHandlerClient registers the http handlers for service Index -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "IndexClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "IndexClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "IndexClient" to call the correct interceptors. -func RegisterIndexHandlerClient(ctx context.Context, mux *runtime.ServeMux, client IndexClient) error { - - mux.Handle("GET", pattern_Index_NodeHealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_NodeHealthCheck_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_NodeHealthCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Index_NodeInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_NodeInfo_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_NodeInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Index_ClusterInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_ClusterInfo_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_ClusterInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Index_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_Get_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("PUT", pattern_Index_Index_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_Index_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_Index_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("PUT", pattern_Index_Index_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_Index_1(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_Index_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_Index_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_Delete_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("PUT", pattern_Index_BulkIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_BulkIndex_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_BulkIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_Index_BulkDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_BulkDelete_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_BulkDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Index_Search_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_Search_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_Search_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Index_NodeHealthCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "healthcheck"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Index_NodeInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "status"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Index_ClusterInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "cluster", "status"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Index_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Index_Index_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "documents"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Index_Index_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Index_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Index_BulkIndex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "bulk"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Index_BulkDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "bulk"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Index_Search_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "search"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Index_NodeHealthCheck_0 = runtime.ForwardResponseMessage - - forward_Index_NodeInfo_0 = runtime.ForwardResponseMessage - - forward_Index_ClusterInfo_0 = runtime.ForwardResponseMessage - - forward_Index_Get_0 = runtime.ForwardResponseMessage - - forward_Index_Index_0 = runtime.ForwardResponseMessage - - forward_Index_Index_1 = runtime.ForwardResponseMessage - - forward_Index_Delete_0 = runtime.ForwardResponseMessage - - forward_Index_BulkIndex_0 = runtime.ForwardResponseMessage - - forward_Index_BulkDelete_0 = runtime.ForwardResponseMessage - - forward_Index_Search_0 = runtime.ForwardResponseMessage -) diff --git a/protobuf/index/index.proto b/protobuf/index/index.proto deleted file mode 100644 index 5dee6a8..0000000 --- a/protobuf/index/index.proto +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -import "google/protobuf/any.proto"; -import "google/protobuf/empty.proto"; -import "google/api/annotations.proto"; - -package index; - -option go_package = "github.com/mosuka/blast/protobuf/index"; - -service Index { - rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) { - option (google.api.http) = { - get: "/v1/node/healthcheck" - }; - } - rpc NodeInfo (google.protobuf.Empty) returns (NodeInfoResponse) { - option (google.api.http) = { - get: "/v1/node/status" - }; - } - - rpc ClusterJoin (ClusterJoinRequest) returns (google.protobuf.Empty) {} - rpc ClusterLeave (ClusterLeaveRequest) returns (google.protobuf.Empty) {} - rpc ClusterInfo (google.protobuf.Empty) returns (ClusterInfoResponse) { - option (google.api.http) = { - get: "/v1/cluster/status" - }; - } - rpc ClusterWatch (google.protobuf.Empty) returns (stream ClusterWatchResponse) {} - - rpc Get (GetRequest) returns (GetResponse) { - option (google.api.http) = { - get: "/v1/documents/{id=**}" - }; - } - rpc Index (IndexRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - put: "/v1/documents" - body: "*" - additional_bindings { - put: "/v1/documents/{id=**}" - body: "*" - } - }; - } - rpc Delete (DeleteRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/documents/{id=**}" - }; - } - rpc BulkIndex (BulkIndexRequest) returns (BulkIndexResponse) { - option (google.api.http) = { - put: "/v1/bulk" - body: "*" - }; - } - rpc BulkDelete (BulkDeleteRequest) returns (BulkDeleteResponse) { - option (google.api.http) = { - delete: "/v1/bulk" - body: "*" - }; - } - rpc Search (SearchRequest) returns (SearchResponse) { - option (google.api.http) = { - post: "/v1/search" - body: "*" - }; - } - rpc GetIndexConfig (google.protobuf.Empty) returns (GetIndexConfigResponse) {} - rpc GetIndexStats (google.protobuf.Empty) returns (GetIndexStatsResponse) {} - rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) {} -} - -message NodeHealthCheckRequest { - enum Probe { - UNKNOWN = 0; - HEALTHINESS = 1; - LIVENESS = 2; - READINESS = 3; - } - Probe probe = 1; -} - -message NodeHealthCheckResponse { - enum State { - UNKNOWN = 0; - HEALTHY = 1; - UNHEALTHY = 2; - ALIVE = 3; - DEAD = 4; - READY = 5; - NOT_READY = 6; - } - State state = 1; -} - -message Metadata { - string grpc_address = 1; - string grpc_gateway_address = 2; - string http_address = 3; -} - -message Node { - enum State { - UNKNOWN = 0; - FOLLOWER = 1; - CANDIDATE = 2; - LEADER = 3; - SHUTDOWN = 4; - } - string id = 1; - string bind_address = 2; - State state = 3; - Metadata metadata = 4; -} - -message Cluster { - map nodes = 1; -} - -message NodeInfoResponse { - Node node = 1; -} - -message ClusterJoinRequest { - Node node = 1; -} - -message ClusterLeaveRequest { - string id = 1; -} - -message ClusterInfoResponse { - Cluster cluster = 1; -} - -message ClusterWatchResponse { - enum Event { - UNKNOWN = 0; - JOIN = 1; - LEAVE = 2; - UPDATE = 3; - } - Event event = 1; - Node node = 2; - Cluster cluster = 3; -} - -message GetRequest { - string id = 1; -} - -message GetResponse { -// Document document = 1; - google.protobuf.Any fields = 1; -} - -message IndexRequest { - string id = 1; - google.protobuf.Any fields = 2; -} - -message DeleteRequest { - string id = 1; -} - -message Document { - string id = 1; - google.protobuf.Any fields = 2; -} - -message BulkIndexRequest { - repeated Document documents = 1; -} - -message BulkIndexResponse { - int32 count = 1; -} - -message BulkDeleteRequest { - repeated string ids = 1; -} - -message BulkDeleteResponse { - int32 count = 1; -} - -message SearchRequest { - google.protobuf.Any search_request = 1; -} - -message SearchResponse { - google.protobuf.Any search_result = 1; -} - -message IndexConfig { - google.protobuf.Any index_mapping = 1; - string index_type = 2; - string index_storage_type = 3; -} - -message GetIndexConfigResponse { - IndexConfig index_config = 1; -} - -message GetIndexStatsResponse { - google.protobuf.Any index_stats = 1; -} - -message Proposal { - enum Event { - UNKNOWN = 0; - SET_NODE = 1; - DELETE_NODE = 2; - INDEX = 3; - DELETE = 4; - BULK_INDEX = 5; - BULK_DELETE = 6; - } - Event event = 1; - Node node = 2; - Document document = 3; - string id = 4; - repeated Document documents = 5; - repeated string ids = 6; -} diff --git a/protobuf/index/index.swagger.json b/protobuf/index/index.swagger.json deleted file mode 100644 index 5d96593..0000000 --- a/protobuf/index/index.swagger.json +++ /dev/null @@ -1,557 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "protobuf/index/index.proto", - "version": "version not set" - }, - "schemes": [ - "http", - "https" - ], - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/v1/bulk": { - "delete": { - "operationId": "BulkDelete", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/indexBulkDeleteResponse" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/indexBulkDeleteRequest" - } - } - ], - "tags": [ - "Index" - ] - }, - "put": { - "operationId": "BulkIndex", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/indexBulkIndexResponse" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/indexBulkIndexRequest" - } - } - ], - "tags": [ - "Index" - ] - } - }, - "/v1/cluster/status": { - "get": { - "operationId": "ClusterInfo", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/indexClusterInfoResponse" - } - } - }, - "tags": [ - "Index" - ] - } - }, - "/v1/documents": { - "put": { - "operationId": "Index", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "properties": {} - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/indexIndexRequest" - } - } - ], - "tags": [ - "Index" - ] - } - }, - "/v1/documents/{id}": { - "get": { - "operationId": "Get", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/indexGetResponse" - } - } - }, - "parameters": [ - { - "name": "id", - "in": "path", - "required": true, - "type": "string" - } - ], - "tags": [ - "Index" - ] - }, - "delete": { - "operationId": "Delete", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "properties": {} - } - } - }, - "parameters": [ - { - "name": "id", - "in": "path", - "required": true, - "type": "string" - } - ], - "tags": [ - "Index" - ] - }, - "put": { - "operationId": "Index2", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "properties": {} - } - } - }, - "parameters": [ - { - "name": "id", - "in": "path", - "required": true, - "type": "string" - }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/indexIndexRequest" - } - } - ], - "tags": [ - "Index" - ] - } - }, - "/v1/node/healthcheck": { - "get": { - "operationId": "NodeHealthCheck", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/indexNodeHealthCheckResponse" - } - } - }, - "parameters": [ - { - "name": "probe", - "in": "query", - "required": false, - "type": "string", - "enum": [ - "UNKNOWN", - "HEALTHINESS", - "LIVENESS", - "READINESS" - ], - "default": "UNKNOWN" - } - ], - "tags": [ - "Index" - ] - } - }, - "/v1/node/status": { - "get": { - "operationId": "NodeInfo", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/indexNodeInfoResponse" - } - } - }, - "tags": [ - "Index" - ] - } - }, - "/v1/search": { - "post": { - "operationId": "Search", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/indexSearchResponse" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/indexSearchRequest" - } - } - ], - "tags": [ - "Index" - ] - } - } - }, - "definitions": { - "NodeHealthCheckRequestProbe": { - "type": "string", - "enum": [ - "UNKNOWN", - "HEALTHINESS", - "LIVENESS", - "READINESS" - ], - "default": "UNKNOWN" - }, - "indexBulkDeleteRequest": { - "type": "object", - "properties": { - "ids": { - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "indexBulkDeleteResponse": { - "type": "object", - "properties": { - "count": { - "type": "integer", - "format": "int32" - } - } - }, - "indexBulkIndexRequest": { - "type": "object", - "properties": { - "documents": { - "type": "array", - "items": { - "$ref": "#/definitions/indexDocument" - } - } - } - }, - "indexBulkIndexResponse": { - "type": "object", - "properties": { - "count": { - "type": "integer", - "format": "int32" - } - } - }, - "indexCluster": { - "type": "object", - "properties": { - "nodes": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/indexNode" - } - } - } - }, - "indexClusterInfoResponse": { - "type": "object", - "properties": { - "cluster": { - "$ref": "#/definitions/indexCluster" - } - } - }, - "indexClusterWatchResponse": { - "type": "object", - "properties": { - "event": { - "$ref": "#/definitions/indexClusterWatchResponseEvent" - }, - "node": { - "$ref": "#/definitions/indexNode" - }, - "cluster": { - "$ref": "#/definitions/indexCluster" - } - } - }, - "indexClusterWatchResponseEvent": { - "type": "string", - "enum": [ - "UNKNOWN", - "JOIN", - "LEAVE", - "UPDATE" - ], - "default": "UNKNOWN" - }, - "indexDocument": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "fields": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "indexGetIndexConfigResponse": { - "type": "object", - "properties": { - "index_config": { - "$ref": "#/definitions/indexIndexConfig" - } - } - }, - "indexGetIndexStatsResponse": { - "type": "object", - "properties": { - "index_stats": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "indexGetResponse": { - "type": "object", - "properties": { - "fields": { - "$ref": "#/definitions/protobufAny", - "title": "Document document = 1;" - } - } - }, - "indexIndexConfig": { - "type": "object", - "properties": { - "index_mapping": { - "$ref": "#/definitions/protobufAny" - }, - "index_type": { - "type": "string" - }, - "index_storage_type": { - "type": "string" - } - } - }, - "indexIndexRequest": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "fields": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "indexMetadata": { - "type": "object", - "properties": { - "grpc_address": { - "type": "string" - }, - "grpc_gateway_address": { - "type": "string" - }, - "http_address": { - "type": "string" - } - } - }, - "indexNode": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "bind_address": { - "type": "string" - }, - "state": { - "$ref": "#/definitions/indexNodeState" - }, - "metadata": { - "$ref": "#/definitions/indexMetadata" - } - } - }, - "indexNodeHealthCheckResponse": { - "type": "object", - "properties": { - "state": { - "$ref": "#/definitions/indexNodeHealthCheckResponseState" - } - } - }, - "indexNodeHealthCheckResponseState": { - "type": "string", - "enum": [ - "UNKNOWN", - "HEALTHY", - "UNHEALTHY", - "ALIVE", - "DEAD", - "READY", - "NOT_READY" - ], - "default": "UNKNOWN" - }, - "indexNodeInfoResponse": { - "type": "object", - "properties": { - "node": { - "$ref": "#/definitions/indexNode" - } - } - }, - "indexNodeState": { - "type": "string", - "enum": [ - "UNKNOWN", - "FOLLOWER", - "CANDIDATE", - "LEADER", - "SHUTDOWN" - ], - "default": "UNKNOWN" - }, - "indexSearchRequest": { - "type": "object", - "properties": { - "search_request": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "indexSearchResponse": { - "type": "object", - "properties": { - "search_result": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string", - "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." - }, - "value": { - "type": "string", - "format": "byte", - "description": "Must be a valid serialized protocol buffer of the above specified type." - } - }, - "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" - }, - "runtimeStreamError": { - "type": "object", - "properties": { - "grpc_code": { - "type": "integer", - "format": "int32" - }, - "http_code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "http_status": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - } - }, - "x-stream-definitions": { - "indexClusterWatchResponse": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/indexClusterWatchResponse" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of indexClusterWatchResponse" - } - } -} diff --git a/protobuf/management/management.pb.go b/protobuf/management/management.pb.go deleted file mode 100644 index 40577fb..0000000 --- a/protobuf/management/management.pb.go +++ /dev/null @@ -1,1649 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: protobuf/management/management.proto - -package management - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" - empty "github.com/golang/protobuf/ptypes/empty" - _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type NodeHealthCheckRequest_Probe int32 - -const ( - NodeHealthCheckRequest_UNKNOWN NodeHealthCheckRequest_Probe = 0 - NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 1 - NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 2 - NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 3 -) - -var NodeHealthCheckRequest_Probe_name = map[int32]string{ - 0: "UNKNOWN", - 1: "HEALTHINESS", - 2: "LIVENESS", - 3: "READINESS", -} - -var NodeHealthCheckRequest_Probe_value = map[string]int32{ - "UNKNOWN": 0, - "HEALTHINESS": 1, - "LIVENESS": 2, - "READINESS": 3, -} - -func (x NodeHealthCheckRequest_Probe) String() string { - return proto.EnumName(NodeHealthCheckRequest_Probe_name, int32(x)) -} - -func (NodeHealthCheckRequest_Probe) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{0, 0} -} - -type NodeHealthCheckResponse_State int32 - -const ( - NodeHealthCheckResponse_UNKNOWN NodeHealthCheckResponse_State = 0 - NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 1 - NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 2 - NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 3 - NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 4 - NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 5 - NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 6 -) - -var NodeHealthCheckResponse_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "HEALTHY", - 2: "UNHEALTHY", - 3: "ALIVE", - 4: "DEAD", - 5: "READY", - 6: "NOT_READY", -} - -var NodeHealthCheckResponse_State_value = map[string]int32{ - "UNKNOWN": 0, - "HEALTHY": 1, - "UNHEALTHY": 2, - "ALIVE": 3, - "DEAD": 4, - "READY": 5, - "NOT_READY": 6, -} - -func (x NodeHealthCheckResponse_State) String() string { - return proto.EnumName(NodeHealthCheckResponse_State_name, int32(x)) -} - -func (NodeHealthCheckResponse_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{1, 0} -} - -type Node_State int32 - -const ( - Node_UNKNOWN Node_State = 0 - Node_FOLLOWER Node_State = 1 - Node_CANDIDATE Node_State = 2 - Node_LEADER Node_State = 3 - Node_SHUTDOWN Node_State = 4 -) - -var Node_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "FOLLOWER", - 2: "CANDIDATE", - 3: "LEADER", - 4: "SHUTDOWN", -} - -var Node_State_value = map[string]int32{ - "UNKNOWN": 0, - "FOLLOWER": 1, - "CANDIDATE": 2, - "LEADER": 3, - "SHUTDOWN": 4, -} - -func (x Node_State) String() string { - return proto.EnumName(Node_State_name, int32(x)) -} - -func (Node_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{3, 0} -} - -type ClusterWatchResponse_Event int32 - -const ( - ClusterWatchResponse_UNKNOWN ClusterWatchResponse_Event = 0 - ClusterWatchResponse_JOIN ClusterWatchResponse_Event = 1 - ClusterWatchResponse_LEAVE ClusterWatchResponse_Event = 2 - ClusterWatchResponse_UPDATE ClusterWatchResponse_Event = 3 -) - -var ClusterWatchResponse_Event_name = map[int32]string{ - 0: "UNKNOWN", - 1: "JOIN", - 2: "LEAVE", - 3: "UPDATE", -} - -var ClusterWatchResponse_Event_value = map[string]int32{ - "UNKNOWN": 0, - "JOIN": 1, - "LEAVE": 2, - "UPDATE": 3, -} - -func (x ClusterWatchResponse_Event) String() string { - return proto.EnumName(ClusterWatchResponse_Event_name, int32(x)) -} - -func (ClusterWatchResponse_Event) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{9, 0} -} - -type WatchResponse_Command int32 - -const ( - WatchResponse_UNKNOWN WatchResponse_Command = 0 - WatchResponse_SET WatchResponse_Command = 1 - WatchResponse_DELETE WatchResponse_Command = 2 -) - -var WatchResponse_Command_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SET", - 2: "DELETE", -} - -var WatchResponse_Command_value = map[string]int32{ - "UNKNOWN": 0, - "SET": 1, - "DELETE": 2, -} - -func (x WatchResponse_Command) String() string { - return proto.EnumName(WatchResponse_Command_name, int32(x)) -} - -func (WatchResponse_Command) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{16, 0} -} - -type Proposal_Event int32 - -const ( - Proposal_UNKNOWN Proposal_Event = 0 - Proposal_SET_NODE Proposal_Event = 1 - Proposal_DELETE_NODE Proposal_Event = 2 - Proposal_SET_VALUE Proposal_Event = 3 - Proposal_DELETE_VALUE Proposal_Event = 4 -) - -var Proposal_Event_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SET_NODE", - 2: "DELETE_NODE", - 3: "SET_VALUE", - 4: "DELETE_VALUE", -} - -var Proposal_Event_value = map[string]int32{ - "UNKNOWN": 0, - "SET_NODE": 1, - "DELETE_NODE": 2, - "SET_VALUE": 3, - "DELETE_VALUE": 4, -} - -func (x Proposal_Event) String() string { - return proto.EnumName(Proposal_Event_name, int32(x)) -} - -func (Proposal_Event) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{17, 0} -} - -type NodeHealthCheckRequest struct { - Probe NodeHealthCheckRequest_Probe `protobuf:"varint,1,opt,name=probe,proto3,enum=management.NodeHealthCheckRequest_Probe" json:"probe,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeHealthCheckRequest) Reset() { *m = NodeHealthCheckRequest{} } -func (m *NodeHealthCheckRequest) String() string { return proto.CompactTextString(m) } -func (*NodeHealthCheckRequest) ProtoMessage() {} -func (*NodeHealthCheckRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{0} -} - -func (m *NodeHealthCheckRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeHealthCheckRequest.Unmarshal(m, b) -} -func (m *NodeHealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeHealthCheckRequest.Marshal(b, m, deterministic) -} -func (m *NodeHealthCheckRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeHealthCheckRequest.Merge(m, src) -} -func (m *NodeHealthCheckRequest) XXX_Size() int { - return xxx_messageInfo_NodeHealthCheckRequest.Size(m) -} -func (m *NodeHealthCheckRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeHealthCheckRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeHealthCheckRequest proto.InternalMessageInfo - -func (m *NodeHealthCheckRequest) GetProbe() NodeHealthCheckRequest_Probe { - if m != nil { - return m.Probe - } - return NodeHealthCheckRequest_UNKNOWN -} - -type NodeHealthCheckResponse struct { - State NodeHealthCheckResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=management.NodeHealthCheckResponse_State" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeHealthCheckResponse) Reset() { *m = NodeHealthCheckResponse{} } -func (m *NodeHealthCheckResponse) String() string { return proto.CompactTextString(m) } -func (*NodeHealthCheckResponse) ProtoMessage() {} -func (*NodeHealthCheckResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{1} -} - -func (m *NodeHealthCheckResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeHealthCheckResponse.Unmarshal(m, b) -} -func (m *NodeHealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeHealthCheckResponse.Marshal(b, m, deterministic) -} -func (m *NodeHealthCheckResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeHealthCheckResponse.Merge(m, src) -} -func (m *NodeHealthCheckResponse) XXX_Size() int { - return xxx_messageInfo_NodeHealthCheckResponse.Size(m) -} -func (m *NodeHealthCheckResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeHealthCheckResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeHealthCheckResponse proto.InternalMessageInfo - -func (m *NodeHealthCheckResponse) GetState() NodeHealthCheckResponse_State { - if m != nil { - return m.State - } - return NodeHealthCheckResponse_UNKNOWN -} - -type Metadata struct { - GrpcAddress string `protobuf:"bytes,1,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` - GrpcGatewayAddress string `protobuf:"bytes,2,opt,name=grpc_gateway_address,json=grpcGatewayAddress,proto3" json:"grpc_gateway_address,omitempty"` - HttpAddress string `protobuf:"bytes,3,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{2} -} - -func (m *Metadata) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metadata.Unmarshal(m, b) -} -func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) -} -func (m *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(m, src) -} -func (m *Metadata) XXX_Size() int { - return xxx_messageInfo_Metadata.Size(m) -} -func (m *Metadata) XXX_DiscardUnknown() { - xxx_messageInfo_Metadata.DiscardUnknown(m) -} - -var xxx_messageInfo_Metadata proto.InternalMessageInfo - -func (m *Metadata) GetGrpcAddress() string { - if m != nil { - return m.GrpcAddress - } - return "" -} - -func (m *Metadata) GetGrpcGatewayAddress() string { - if m != nil { - return m.GrpcGatewayAddress - } - return "" -} - -func (m *Metadata) GetHttpAddress() string { - if m != nil { - return m.HttpAddress - } - return "" -} - -type Node struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - BindAddress string `protobuf:"bytes,2,opt,name=bind_address,json=bindAddress,proto3" json:"bind_address,omitempty"` - State Node_State `protobuf:"varint,3,opt,name=state,proto3,enum=management.Node_State" json:"state,omitempty"` - Metadata *Metadata `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Node) Reset() { *m = Node{} } -func (m *Node) String() string { return proto.CompactTextString(m) } -func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{3} -} - -func (m *Node) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Node.Unmarshal(m, b) -} -func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Node.Marshal(b, m, deterministic) -} -func (m *Node) XXX_Merge(src proto.Message) { - xxx_messageInfo_Node.Merge(m, src) -} -func (m *Node) XXX_Size() int { - return xxx_messageInfo_Node.Size(m) -} -func (m *Node) XXX_DiscardUnknown() { - xxx_messageInfo_Node.DiscardUnknown(m) -} - -var xxx_messageInfo_Node proto.InternalMessageInfo - -func (m *Node) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *Node) GetBindAddress() string { - if m != nil { - return m.BindAddress - } - return "" -} - -func (m *Node) GetState() Node_State { - if m != nil { - return m.State - } - return Node_UNKNOWN -} - -func (m *Node) GetMetadata() *Metadata { - if m != nil { - return m.Metadata - } - return nil -} - -type Cluster struct { - Nodes map[string]*Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Cluster) Reset() { *m = Cluster{} } -func (m *Cluster) String() string { return proto.CompactTextString(m) } -func (*Cluster) ProtoMessage() {} -func (*Cluster) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{4} -} - -func (m *Cluster) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Cluster.Unmarshal(m, b) -} -func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) -} -func (m *Cluster) XXX_Merge(src proto.Message) { - xxx_messageInfo_Cluster.Merge(m, src) -} -func (m *Cluster) XXX_Size() int { - return xxx_messageInfo_Cluster.Size(m) -} -func (m *Cluster) XXX_DiscardUnknown() { - xxx_messageInfo_Cluster.DiscardUnknown(m) -} - -var xxx_messageInfo_Cluster proto.InternalMessageInfo - -func (m *Cluster) GetNodes() map[string]*Node { - if m != nil { - return m.Nodes - } - return nil -} - -type NodeInfoResponse struct { - Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeInfoResponse) Reset() { *m = NodeInfoResponse{} } -func (m *NodeInfoResponse) String() string { return proto.CompactTextString(m) } -func (*NodeInfoResponse) ProtoMessage() {} -func (*NodeInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{5} -} - -func (m *NodeInfoResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeInfoResponse.Unmarshal(m, b) -} -func (m *NodeInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeInfoResponse.Marshal(b, m, deterministic) -} -func (m *NodeInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeInfoResponse.Merge(m, src) -} -func (m *NodeInfoResponse) XXX_Size() int { - return xxx_messageInfo_NodeInfoResponse.Size(m) -} -func (m *NodeInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeInfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeInfoResponse proto.InternalMessageInfo - -func (m *NodeInfoResponse) GetNode() *Node { - if m != nil { - return m.Node - } - return nil -} - -type ClusterJoinRequest struct { - Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClusterJoinRequest) Reset() { *m = ClusterJoinRequest{} } -func (m *ClusterJoinRequest) String() string { return proto.CompactTextString(m) } -func (*ClusterJoinRequest) ProtoMessage() {} -func (*ClusterJoinRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{6} -} - -func (m *ClusterJoinRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClusterJoinRequest.Unmarshal(m, b) -} -func (m *ClusterJoinRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClusterJoinRequest.Marshal(b, m, deterministic) -} -func (m *ClusterJoinRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterJoinRequest.Merge(m, src) -} -func (m *ClusterJoinRequest) XXX_Size() int { - return xxx_messageInfo_ClusterJoinRequest.Size(m) -} -func (m *ClusterJoinRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterJoinRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterJoinRequest proto.InternalMessageInfo - -func (m *ClusterJoinRequest) GetNode() *Node { - if m != nil { - return m.Node - } - return nil -} - -type ClusterLeaveRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClusterLeaveRequest) Reset() { *m = ClusterLeaveRequest{} } -func (m *ClusterLeaveRequest) String() string { return proto.CompactTextString(m) } -func (*ClusterLeaveRequest) ProtoMessage() {} -func (*ClusterLeaveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{7} -} - -func (m *ClusterLeaveRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClusterLeaveRequest.Unmarshal(m, b) -} -func (m *ClusterLeaveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClusterLeaveRequest.Marshal(b, m, deterministic) -} -func (m *ClusterLeaveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterLeaveRequest.Merge(m, src) -} -func (m *ClusterLeaveRequest) XXX_Size() int { - return xxx_messageInfo_ClusterLeaveRequest.Size(m) -} -func (m *ClusterLeaveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterLeaveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterLeaveRequest proto.InternalMessageInfo - -func (m *ClusterLeaveRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -type ClusterInfoResponse struct { - Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClusterInfoResponse) Reset() { *m = ClusterInfoResponse{} } -func (m *ClusterInfoResponse) String() string { return proto.CompactTextString(m) } -func (*ClusterInfoResponse) ProtoMessage() {} -func (*ClusterInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{8} -} - -func (m *ClusterInfoResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClusterInfoResponse.Unmarshal(m, b) -} -func (m *ClusterInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClusterInfoResponse.Marshal(b, m, deterministic) -} -func (m *ClusterInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterInfoResponse.Merge(m, src) -} -func (m *ClusterInfoResponse) XXX_Size() int { - return xxx_messageInfo_ClusterInfoResponse.Size(m) -} -func (m *ClusterInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterInfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterInfoResponse proto.InternalMessageInfo - -func (m *ClusterInfoResponse) GetCluster() *Cluster { - if m != nil { - return m.Cluster - } - return nil -} - -type ClusterWatchResponse struct { - Event ClusterWatchResponse_Event `protobuf:"varint,1,opt,name=event,proto3,enum=management.ClusterWatchResponse_Event" json:"event,omitempty"` - Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` - Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster,proto3" json:"cluster,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClusterWatchResponse) Reset() { *m = ClusterWatchResponse{} } -func (m *ClusterWatchResponse) String() string { return proto.CompactTextString(m) } -func (*ClusterWatchResponse) ProtoMessage() {} -func (*ClusterWatchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{9} -} - -func (m *ClusterWatchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClusterWatchResponse.Unmarshal(m, b) -} -func (m *ClusterWatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClusterWatchResponse.Marshal(b, m, deterministic) -} -func (m *ClusterWatchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterWatchResponse.Merge(m, src) -} -func (m *ClusterWatchResponse) XXX_Size() int { - return xxx_messageInfo_ClusterWatchResponse.Size(m) -} -func (m *ClusterWatchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterWatchResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterWatchResponse proto.InternalMessageInfo - -func (m *ClusterWatchResponse) GetEvent() ClusterWatchResponse_Event { - if m != nil { - return m.Event - } - return ClusterWatchResponse_UNKNOWN -} - -func (m *ClusterWatchResponse) GetNode() *Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *ClusterWatchResponse) GetCluster() *Cluster { - if m != nil { - return m.Cluster - } - return nil -} - -type KeyValue struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value *any.Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (m *KeyValue) String() string { return proto.CompactTextString(m) } -func (*KeyValue) ProtoMessage() {} -func (*KeyValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{10} -} - -func (m *KeyValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_KeyValue.Unmarshal(m, b) -} -func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic) -} -func (m *KeyValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyValue.Merge(m, src) -} -func (m *KeyValue) XXX_Size() int { - return xxx_messageInfo_KeyValue.Size(m) -} -func (m *KeyValue) XXX_DiscardUnknown() { - xxx_messageInfo_KeyValue.DiscardUnknown(m) -} - -var xxx_messageInfo_KeyValue proto.InternalMessageInfo - -func (m *KeyValue) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *KeyValue) GetValue() *any.Any { - if m != nil { - return m.Value - } - return nil -} - -type GetRequest struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetRequest) Reset() { *m = GetRequest{} } -func (m *GetRequest) String() string { return proto.CompactTextString(m) } -func (*GetRequest) ProtoMessage() {} -func (*GetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{11} -} - -func (m *GetRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetRequest.Unmarshal(m, b) -} -func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) -} -func (m *GetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetRequest.Merge(m, src) -} -func (m *GetRequest) XXX_Size() int { - return xxx_messageInfo_GetRequest.Size(m) -} -func (m *GetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetRequest proto.InternalMessageInfo - -func (m *GetRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -type GetResponse struct { - // option (grpc.gateway.protoc_gen_swagger.options.openapiv2_schema) = { - // json_schema: { - // required: ["value"] - // }, - // example: { - // value: '{ "fields": { "field1": "Get Example", "field2": "This is an example Get response." } }' - // } - // }; - // google.protobuf.Any value = 1 [(grpc.gateway.protoc_gen_swagger.options.openapiv2_field) = {type: 6}]; - Value *any.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetResponse) Reset() { *m = GetResponse{} } -func (m *GetResponse) String() string { return proto.CompactTextString(m) } -func (*GetResponse) ProtoMessage() {} -func (*GetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{12} -} - -func (m *GetResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetResponse.Unmarshal(m, b) -} -func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) -} -func (m *GetResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetResponse.Merge(m, src) -} -func (m *GetResponse) XXX_Size() int { - return xxx_messageInfo_GetResponse.Size(m) -} -func (m *GetResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetResponse proto.InternalMessageInfo - -func (m *GetResponse) GetValue() *any.Any { - if m != nil { - return m.Value - } - return nil -} - -type SetRequest struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value *any.Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetRequest) Reset() { *m = SetRequest{} } -func (m *SetRequest) String() string { return proto.CompactTextString(m) } -func (*SetRequest) ProtoMessage() {} -func (*SetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{13} -} - -func (m *SetRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetRequest.Unmarshal(m, b) -} -func (m *SetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetRequest.Marshal(b, m, deterministic) -} -func (m *SetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetRequest.Merge(m, src) -} -func (m *SetRequest) XXX_Size() int { - return xxx_messageInfo_SetRequest.Size(m) -} -func (m *SetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetRequest proto.InternalMessageInfo - -func (m *SetRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *SetRequest) GetValue() *any.Any { - if m != nil { - return m.Value - } - return nil -} - -type DeleteRequest struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRequest) ProtoMessage() {} -func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{14} -} - -func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) -} -func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) -} -func (m *DeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRequest.Merge(m, src) -} -func (m *DeleteRequest) XXX_Size() int { - return xxx_messageInfo_DeleteRequest.Size(m) -} -func (m *DeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo - -func (m *DeleteRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -type WatchRequest struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchRequest) Reset() { *m = WatchRequest{} } -func (m *WatchRequest) String() string { return proto.CompactTextString(m) } -func (*WatchRequest) ProtoMessage() {} -func (*WatchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{15} -} - -func (m *WatchRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_WatchRequest.Unmarshal(m, b) -} -func (m *WatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_WatchRequest.Marshal(b, m, deterministic) -} -func (m *WatchRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchRequest.Merge(m, src) -} -func (m *WatchRequest) XXX_Size() int { - return xxx_messageInfo_WatchRequest.Size(m) -} -func (m *WatchRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WatchRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WatchRequest proto.InternalMessageInfo - -func (m *WatchRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -type WatchResponse struct { - Command WatchResponse_Command `protobuf:"varint,1,opt,name=command,proto3,enum=management.WatchResponse_Command" json:"command,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchResponse) Reset() { *m = WatchResponse{} } -func (m *WatchResponse) String() string { return proto.CompactTextString(m) } -func (*WatchResponse) ProtoMessage() {} -func (*WatchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{16} -} - -func (m *WatchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_WatchResponse.Unmarshal(m, b) -} -func (m *WatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_WatchResponse.Marshal(b, m, deterministic) -} -func (m *WatchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchResponse.Merge(m, src) -} -func (m *WatchResponse) XXX_Size() int { - return xxx_messageInfo_WatchResponse.Size(m) -} -func (m *WatchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WatchResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_WatchResponse proto.InternalMessageInfo - -func (m *WatchResponse) GetCommand() WatchResponse_Command { - if m != nil { - return m.Command - } - return WatchResponse_UNKNOWN -} - -func (m *WatchResponse) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *WatchResponse) GetValue() *any.Any { - if m != nil { - return m.Value - } - return nil -} - -type Proposal struct { - Event Proposal_Event `protobuf:"varint,1,opt,name=event,proto3,enum=management.Proposal_Event" json:"event,omitempty"` - Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` - KeyValue *KeyValue `protobuf:"bytes,3,opt,name=key_value,json=keyValue,proto3" json:"key_value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Proposal) Reset() { *m = Proposal{} } -func (m *Proposal) String() string { return proto.CompactTextString(m) } -func (*Proposal) ProtoMessage() {} -func (*Proposal) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{17} -} - -func (m *Proposal) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Proposal.Unmarshal(m, b) -} -func (m *Proposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Proposal.Marshal(b, m, deterministic) -} -func (m *Proposal) XXX_Merge(src proto.Message) { - xxx_messageInfo_Proposal.Merge(m, src) -} -func (m *Proposal) XXX_Size() int { - return xxx_messageInfo_Proposal.Size(m) -} -func (m *Proposal) XXX_DiscardUnknown() { - xxx_messageInfo_Proposal.DiscardUnknown(m) -} - -var xxx_messageInfo_Proposal proto.InternalMessageInfo - -func (m *Proposal) GetEvent() Proposal_Event { - if m != nil { - return m.Event - } - return Proposal_UNKNOWN -} - -func (m *Proposal) GetNode() *Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *Proposal) GetKeyValue() *KeyValue { - if m != nil { - return m.KeyValue - } - return nil -} - -func init() { - proto.RegisterEnum("management.NodeHealthCheckRequest_Probe", NodeHealthCheckRequest_Probe_name, NodeHealthCheckRequest_Probe_value) - proto.RegisterEnum("management.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) - proto.RegisterEnum("management.Node_State", Node_State_name, Node_State_value) - proto.RegisterEnum("management.ClusterWatchResponse_Event", ClusterWatchResponse_Event_name, ClusterWatchResponse_Event_value) - proto.RegisterEnum("management.WatchResponse_Command", WatchResponse_Command_name, WatchResponse_Command_value) - proto.RegisterEnum("management.Proposal_Event", Proposal_Event_name, Proposal_Event_value) - proto.RegisterType((*NodeHealthCheckRequest)(nil), "management.NodeHealthCheckRequest") - proto.RegisterType((*NodeHealthCheckResponse)(nil), "management.NodeHealthCheckResponse") - proto.RegisterType((*Metadata)(nil), "management.Metadata") - proto.RegisterType((*Node)(nil), "management.Node") - proto.RegisterType((*Cluster)(nil), "management.Cluster") - proto.RegisterMapType((map[string]*Node)(nil), "management.Cluster.NodesEntry") - proto.RegisterType((*NodeInfoResponse)(nil), "management.NodeInfoResponse") - proto.RegisterType((*ClusterJoinRequest)(nil), "management.ClusterJoinRequest") - proto.RegisterType((*ClusterLeaveRequest)(nil), "management.ClusterLeaveRequest") - proto.RegisterType((*ClusterInfoResponse)(nil), "management.ClusterInfoResponse") - proto.RegisterType((*ClusterWatchResponse)(nil), "management.ClusterWatchResponse") - proto.RegisterType((*KeyValue)(nil), "management.KeyValue") - proto.RegisterType((*GetRequest)(nil), "management.GetRequest") - proto.RegisterType((*GetResponse)(nil), "management.GetResponse") - proto.RegisterType((*SetRequest)(nil), "management.SetRequest") - proto.RegisterType((*DeleteRequest)(nil), "management.DeleteRequest") - proto.RegisterType((*WatchRequest)(nil), "management.WatchRequest") - proto.RegisterType((*WatchResponse)(nil), "management.WatchResponse") - proto.RegisterType((*Proposal)(nil), "management.Proposal") -} - -func init() { - proto.RegisterFile("protobuf/management/management.proto", fileDescriptor_5e030ad796566078) -} - -var fileDescriptor_5e030ad796566078 = []byte{ - // 1213 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0xcd, 0x72, 0xda, 0x56, - 0x14, 0x8e, 0x10, 0x18, 0x7c, 0x20, 0xb1, 0x72, 0xcd, 0xf8, 0x87, 0x7a, 0x52, 0x5b, 0x4d, 0x33, - 0xae, 0x13, 0x83, 0xe3, 0xb6, 0x33, 0xa9, 0xfb, 0x4b, 0x8c, 0x6a, 0x63, 0x13, 0x70, 0x05, 0xb6, - 0xc7, 0xdd, 0x78, 0x2e, 0x70, 0x03, 0x0c, 0x20, 0x51, 0x74, 0x71, 0xca, 0x74, 0xba, 0xc9, 0xb6, - 0xcb, 0x6e, 0xfb, 0x1e, 0x5d, 0xe4, 0x31, 0xfa, 0x02, 0x5d, 0x74, 0xba, 0xe9, 0x4b, 0x74, 0xee, - 0x8f, 0x64, 0x09, 0x84, 0xed, 0x76, 0x65, 0xe9, 0x9c, 0xef, 0x7c, 0xe7, 0x3b, 0xe7, 0x1e, 0x9d, - 0x8b, 0xe1, 0xf1, 0x60, 0x68, 0x53, 0xbb, 0x3e, 0x7a, 0x9d, 0xeb, 0x63, 0x0b, 0xb7, 0x48, 0x9f, - 0x58, 0xd4, 0xf7, 0x98, 0xe5, 0x6e, 0x04, 0xd7, 0x96, 0xcc, 0x6a, 0xcb, 0xb6, 0x5b, 0x3d, 0x92, - 0xf3, 0x02, 0xb1, 0x35, 0x16, 0xb0, 0xcc, 0x7b, 0x93, 0x2e, 0xd2, 0x1f, 0x50, 0xd7, 0xb9, 0x26, - 0x9d, 0x78, 0xd0, 0xc9, 0x61, 0xcb, 0xb2, 0x29, 0xa6, 0x1d, 0xdb, 0x72, 0xa4, 0xf7, 0x19, 0xff, - 0xd3, 0xd8, 0x6e, 0x11, 0x6b, 0xdb, 0x79, 0x83, 0x5b, 0x2d, 0x32, 0xcc, 0xd9, 0x03, 0x8e, 0x98, - 0x46, 0xeb, 0xbf, 0x29, 0xb0, 0x54, 0xb6, 0x9b, 0xe4, 0x90, 0xe0, 0x1e, 0x6d, 0xef, 0xb7, 0x49, - 0xa3, 0x6b, 0x92, 0x1f, 0x46, 0xc4, 0xa1, 0xe8, 0x2b, 0x88, 0x0d, 0x86, 0x76, 0x9d, 0xac, 0x28, - 0xeb, 0xca, 0xe6, 0x83, 0xdd, 0xcd, 0xac, 0xaf, 0x98, 0xf0, 0x90, 0xec, 0x09, 0xc3, 0x9b, 0x22, - 0x4c, 0x7f, 0x09, 0x31, 0xfe, 0x8e, 0x92, 0x10, 0x3f, 0x2d, 0x1f, 0x97, 0x2b, 0xe7, 0x65, 0xed, - 0x1e, 0x5a, 0x80, 0xe4, 0xa1, 0x91, 0x2f, 0xd5, 0x0e, 0x8b, 0x65, 0xa3, 0x5a, 0xd5, 0x14, 0x94, - 0x82, 0x44, 0xa9, 0x78, 0x66, 0xf0, 0xb7, 0x08, 0xba, 0x0f, 0xf3, 0xa6, 0x91, 0x2f, 0x08, 0xa7, - 0xaa, 0xbf, 0x53, 0x60, 0x79, 0x2a, 0x97, 0x33, 0xb0, 0x2d, 0x87, 0xa0, 0xaf, 0x21, 0xe6, 0x50, - 0x4c, 0x5d, 0x7d, 0x1f, 0xdd, 0xa8, 0x4f, 0xc4, 0x64, 0xab, 0x2c, 0xc0, 0x14, 0x71, 0xfa, 0x25, - 0xc4, 0xf8, 0x7b, 0x50, 0x60, 0x12, 0xe2, 0x42, 0xe0, 0x85, 0xa6, 0x30, 0x39, 0xa7, 0x65, 0xf7, - 0x35, 0x82, 0xe6, 0x21, 0x96, 0x67, 0x62, 0x35, 0x15, 0x25, 0x20, 0x5a, 0x30, 0xf2, 0x05, 0x2d, - 0xca, 0x8c, 0x4c, 0xf2, 0x85, 0x16, 0x63, 0xf0, 0x72, 0xa5, 0x76, 0x29, 0x5e, 0xe7, 0xf4, 0xb7, - 0x0a, 0x24, 0x5e, 0x11, 0x8a, 0x9b, 0x98, 0x62, 0xb4, 0x01, 0xa9, 0xd6, 0x70, 0xd0, 0xb8, 0xc4, - 0xcd, 0xe6, 0x90, 0x38, 0x0e, 0x57, 0x3d, 0x6f, 0x26, 0x99, 0x2d, 0x2f, 0x4c, 0x68, 0x07, 0xd2, - 0x1c, 0xd2, 0xc2, 0x94, 0xbc, 0xc1, 0x63, 0x0f, 0x1a, 0xe1, 0x50, 0xc4, 0x7c, 0x07, 0xc2, 0xe5, - 0x46, 0x6c, 0x40, 0xaa, 0x4d, 0xe9, 0xc0, 0x43, 0xaa, 0x82, 0x94, 0xd9, 0x24, 0x44, 0xff, 0x5b, - 0x81, 0x28, 0x6b, 0x07, 0x7a, 0x00, 0x91, 0x4e, 0x53, 0xa6, 0x8d, 0x74, 0x9a, 0x2c, 0xb6, 0xde, - 0xb1, 0x9a, 0x13, 0x59, 0x92, 0xcc, 0xe6, 0xd2, 0x3f, 0x73, 0x5b, 0xac, 0xf2, 0x16, 0x2f, 0x4d, - 0xb6, 0x38, 0xd0, 0x4f, 0xb4, 0x03, 0x89, 0xbe, 0xac, 0x76, 0x25, 0xba, 0xae, 0x6c, 0x26, 0x77, - 0xd3, 0xfe, 0x00, 0xb7, 0x13, 0xa6, 0x87, 0xd2, 0x8f, 0x43, 0x4f, 0x20, 0x05, 0x89, 0x6f, 0x2b, - 0xa5, 0x52, 0xe5, 0xdc, 0x30, 0xc5, 0x11, 0xec, 0xe7, 0xcb, 0x85, 0x62, 0x21, 0x5f, 0x33, 0xb4, - 0x08, 0x02, 0x98, 0x2b, 0x19, 0xf9, 0x82, 0x61, 0x6a, 0x2a, 0x03, 0x56, 0x0f, 0x4f, 0x6b, 0x05, - 0x16, 0x16, 0xd5, 0x7f, 0x51, 0x20, 0xbe, 0xdf, 0x1b, 0x39, 0x94, 0x0c, 0xd1, 0x27, 0x10, 0xb3, - 0xec, 0x26, 0x61, 0x5d, 0x56, 0x37, 0x93, 0xbb, 0x8f, 0xfc, 0x3a, 0x24, 0x86, 0x17, 0xe0, 0x18, - 0x16, 0x1d, 0x8e, 0x4d, 0x01, 0xce, 0x1c, 0x01, 0x5c, 0x1b, 0x91, 0x06, 0x6a, 0x97, 0x8c, 0x65, - 0xc3, 0xd8, 0x23, 0x7a, 0x02, 0xb1, 0x2b, 0xdc, 0x1b, 0x11, 0xde, 0xaa, 0xe4, 0xae, 0x36, 0xd9, - 0x0e, 0x53, 0xb8, 0xf7, 0x22, 0x2f, 0x14, 0xfd, 0x05, 0x68, 0xcc, 0x54, 0xb4, 0x5e, 0xdb, 0xde, - 0xc4, 0x3e, 0x86, 0x28, 0x4b, 0xc4, 0x29, 0xc3, 0xc2, 0xb9, 0x57, 0xdf, 0x03, 0x24, 0x25, 0x1e, - 0xd9, 0x1d, 0xcb, 0xfd, 0x1a, 0xef, 0x16, 0xfb, 0x21, 0x2c, 0xca, 0xd8, 0x12, 0xc1, 0x57, 0xc4, - 0x0d, 0x9e, 0x38, 0x7a, 0xbd, 0xe0, 0xc1, 0x02, 0xfa, 0xb6, 0x21, 0xde, 0x10, 0x66, 0x99, 0x66, - 0x31, 0xa4, 0x6f, 0xa6, 0x8b, 0xd1, 0xff, 0x54, 0x20, 0x2d, 0x8d, 0xe7, 0x98, 0x36, 0xda, 0x1e, - 0xcf, 0x17, 0x10, 0x23, 0x57, 0xc4, 0xa2, 0xf2, 0xcb, 0x7c, 0x12, 0xc2, 0x12, 0x08, 0xc8, 0x1a, - 0x0c, 0x6d, 0x8a, 0x20, 0xaf, 0xd2, 0xc8, 0x4d, 0x95, 0xfa, 0xb5, 0xaa, 0x77, 0xd0, 0xfa, 0x29, - 0xc4, 0x78, 0x92, 0xe0, 0xa4, 0x25, 0x20, 0x7a, 0x54, 0x29, 0x96, 0x35, 0x85, 0x7d, 0xc4, 0x25, - 0x23, 0x7f, 0x26, 0x27, 0xec, 0xf4, 0x84, 0x4f, 0x9b, 0xaa, 0x1f, 0x42, 0xe2, 0x98, 0x8c, 0xcf, - 0xd8, 0xa9, 0x86, 0xcc, 0xc3, 0x56, 0x70, 0x1e, 0xd2, 0x59, 0xb1, 0x98, 0xb3, 0xee, 0xd6, 0xce, - 0xe6, 0xad, 0xb1, 0x9c, 0x09, 0xfd, 0x11, 0xc0, 0x01, 0xa1, 0xee, 0x81, 0x4c, 0x71, 0xe9, 0x9f, - 0x41, 0x92, 0xfb, 0x65, 0x0b, 0x3d, 0x6a, 0xe5, 0x76, 0xea, 0x23, 0x80, 0xea, 0x0d, 0xd4, 0xff, - 0x49, 0xe6, 0x06, 0xdc, 0x2f, 0x90, 0x1e, 0xa1, 0x64, 0xb6, 0xd2, 0x75, 0x48, 0xc9, 0xd3, 0x9b, - 0x85, 0xf8, 0x5d, 0x81, 0xfb, 0xc1, 0x89, 0xf8, 0x1c, 0xe2, 0x0d, 0xbb, 0xdf, 0xc7, 0x56, 0x53, - 0xce, 0xc4, 0x86, 0xff, 0xb4, 0x82, 0xc3, 0xb0, 0x2f, 0x80, 0xa6, 0x1b, 0xe1, 0x26, 0x88, 0x84, - 0x54, 0xa4, 0xde, 0x5e, 0xd1, 0x53, 0x88, 0x4b, 0xc6, 0xe0, 0xd9, 0xc7, 0x41, 0xad, 0x1a, 0x35, - 0x4d, 0x61, 0xe7, 0x5d, 0x30, 0x4a, 0x06, 0xdb, 0x2e, 0xfa, 0x3f, 0x0a, 0x24, 0x4e, 0x86, 0xf6, - 0xc0, 0x76, 0x70, 0x0f, 0xed, 0x04, 0xc7, 0x38, 0xe3, 0x97, 0xec, 0x82, 0xfe, 0xcf, 0xe8, 0x3e, - 0x87, 0xf9, 0x2e, 0x19, 0x5f, 0x06, 0x2b, 0xf0, 0x41, 0xdd, 0x89, 0x33, 0x13, 0x5d, 0xf9, 0xa4, - 0xd7, 0x42, 0xc7, 0x97, 0xed, 0x3f, 0xa3, 0x76, 0x59, 0xae, 0x14, 0x0c, 0x4d, 0x61, 0x37, 0xab, - 0xa8, 0x43, 0x18, 0xf8, 0x5d, 0xca, 0xdc, 0x67, 0xf9, 0xd2, 0x29, 0xbb, 0xb1, 0x34, 0x48, 0x49, - 0xbf, 0xb0, 0x44, 0x77, 0xdf, 0xc5, 0x01, 0x5e, 0x79, 0x79, 0xd1, 0x8f, 0xb0, 0x30, 0x71, 0x6f, - 0x22, 0xfd, 0xf6, 0x4b, 0x3f, 0xf3, 0xc1, 0x1d, 0x2e, 0x5e, 0x7d, 0xed, 0xed, 0x1f, 0x7f, 0xfd, - 0x1a, 0x59, 0x42, 0xe9, 0xdc, 0xd5, 0xf3, 0x1c, 0xeb, 0x42, 0xae, 0xcd, 0x51, 0x0d, 0x9e, 0xe6, - 0x02, 0x12, 0xee, 0xb2, 0x44, 0x4b, 0x53, 0x87, 0x69, 0xb0, 0xdf, 0x3e, 0x99, 0xb5, 0xc9, 0x34, - 0xfe, 0xd5, 0xa5, 0x2f, 0x73, 0xfe, 0x87, 0x68, 0xc1, 0xe3, 0x67, 0x77, 0xd2, 0xc8, 0x41, 0x07, - 0x90, 0xf4, 0x6d, 0x53, 0x14, 0x76, 0x13, 0xf8, 0xd6, 0x6c, 0x66, 0x46, 0x76, 0xfd, 0x1e, 0x2a, - 0x42, 0xca, 0xbf, 0x5a, 0xd1, 0xfb, 0x21, 0x4c, 0xfe, 0xa5, 0x7b, 0x03, 0x55, 0xdd, 0xd3, 0x74, - 0x63, 0xc5, 0x61, 0x19, 0x02, 0x45, 0x67, 0x78, 0xd1, 0x69, 0x84, 0x58, 0xd1, 0x72, 0xd3, 0xb9, - 0x75, 0x97, 0x3c, 0xb9, 0xfc, 0xeb, 0x9a, 0x99, 0x64, 0xfd, 0xb6, 0xe5, 0xac, 0xdf, 0xdb, 0x51, - 0xd0, 0x77, 0xa0, 0x1e, 0x10, 0x8a, 0x02, 0x3f, 0x00, 0xae, 0xd7, 0x59, 0x66, 0x79, 0xca, 0x2e, - 0x63, 0x57, 0xb9, 0xc2, 0x45, 0xf4, 0x90, 0x29, 0x64, 0x57, 0x7e, 0xee, 0xa7, 0x2e, 0x19, 0x7f, - 0xb9, 0xb5, 0xf5, 0x33, 0xaa, 0x82, 0x5a, 0x9d, 0xa4, 0xbc, 0x5e, 0x63, 0x33, 0xbb, 0x27, 0x07, - 0x29, 0x33, 0xcd, 0xb8, 0xa7, 0x6c, 0xa1, 0x33, 0x98, 0x13, 0xeb, 0x0b, 0xad, 0xfa, 0x79, 0x03, - 0x2b, 0x6d, 0x26, 0xb5, 0x14, 0xbb, 0x15, 0x22, 0xf6, 0x1b, 0x88, 0x89, 0x36, 0xae, 0x84, 0xec, - 0x2d, 0xc1, 0xba, 0x3a, 0x73, 0xa3, 0xf1, 0x0e, 0x9e, 0x40, 0xa2, 0x6a, 0xe1, 0x81, 0xd3, 0xb6, - 0xe9, 0xcc, 0xb3, 0x98, 0x25, 0x2c, 0xcd, 0x85, 0x3d, 0x40, 0x29, 0x26, 0xcc, 0x91, 0x2c, 0x2f, - 0xb7, 0xbf, 0x7f, 0xda, 0xea, 0xd0, 0xf6, 0xa8, 0x9e, 0x6d, 0xd8, 0xfd, 0x5c, 0xdf, 0x76, 0x46, - 0x5d, 0x9c, 0xab, 0xf7, 0xb0, 0x43, 0x73, 0x21, 0xff, 0x8a, 0xd4, 0xe7, 0xb8, 0xf1, 0xe3, 0x7f, - 0x03, 0x00, 0x00, 0xff, 0xff, 0x28, 0xf6, 0xde, 0xbe, 0xa8, 0x0c, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ManagementClient is the client API for Management service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ManagementClient interface { - NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) - NodeInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeInfoResponse, error) - ClusterJoin(ctx context.Context, in *ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) - ClusterLeave(ctx context.Context, in *ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) - ClusterInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterInfoResponse, error) - ClusterWatch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Management_ClusterWatchClient, error) - Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) - Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*empty.Empty, error) - Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) - Watch(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (Management_WatchClient, error) - Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) -} - -type managementClient struct { - cc *grpc.ClientConn -} - -func NewManagementClient(cc *grpc.ClientConn) ManagementClient { - return &managementClient{cc} -} - -func (c *managementClient) NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) { - out := new(NodeHealthCheckResponse) - err := c.cc.Invoke(ctx, "/management.Management/NodeHealthCheck", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managementClient) NodeInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeInfoResponse, error) { - out := new(NodeInfoResponse) - err := c.cc.Invoke(ctx, "/management.Management/NodeInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managementClient) ClusterJoin(ctx context.Context, in *ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/management.Management/ClusterJoin", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managementClient) ClusterLeave(ctx context.Context, in *ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/management.Management/ClusterLeave", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managementClient) ClusterInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterInfoResponse, error) { - out := new(ClusterInfoResponse) - err := c.cc.Invoke(ctx, "/management.Management/ClusterInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managementClient) ClusterWatch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Management_ClusterWatchClient, error) { - stream, err := c.cc.NewStream(ctx, &_Management_serviceDesc.Streams[0], "/management.Management/ClusterWatch", opts...) - if err != nil { - return nil, err - } - x := &managementClusterWatchClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Management_ClusterWatchClient interface { - Recv() (*ClusterWatchResponse, error) - grpc.ClientStream -} - -type managementClusterWatchClient struct { - grpc.ClientStream -} - -func (x *managementClusterWatchClient) Recv() (*ClusterWatchResponse, error) { - m := new(ClusterWatchResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *managementClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { - out := new(GetResponse) - err := c.cc.Invoke(ctx, "/management.Management/Get", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managementClient) Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/management.Management/Set", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managementClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/management.Management/Delete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managementClient) Watch(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (Management_WatchClient, error) { - stream, err := c.cc.NewStream(ctx, &_Management_serviceDesc.Streams[1], "/management.Management/Watch", opts...) - if err != nil { - return nil, err - } - x := &managementWatchClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Management_WatchClient interface { - Recv() (*WatchResponse, error) - grpc.ClientStream -} - -type managementWatchClient struct { - grpc.ClientStream -} - -func (x *managementWatchClient) Recv() (*WatchResponse, error) { - m := new(WatchResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *managementClient) Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/management.Management/Snapshot", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ManagementServer is the server API for Management service. -type ManagementServer interface { - NodeHealthCheck(context.Context, *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) - NodeInfo(context.Context, *empty.Empty) (*NodeInfoResponse, error) - ClusterJoin(context.Context, *ClusterJoinRequest) (*empty.Empty, error) - ClusterLeave(context.Context, *ClusterLeaveRequest) (*empty.Empty, error) - ClusterInfo(context.Context, *empty.Empty) (*ClusterInfoResponse, error) - ClusterWatch(*empty.Empty, Management_ClusterWatchServer) error - Get(context.Context, *GetRequest) (*GetResponse, error) - Set(context.Context, *SetRequest) (*empty.Empty, error) - Delete(context.Context, *DeleteRequest) (*empty.Empty, error) - Watch(*WatchRequest, Management_WatchServer) error - Snapshot(context.Context, *empty.Empty) (*empty.Empty, error) -} - -// UnimplementedManagementServer can be embedded to have forward compatible implementations. -type UnimplementedManagementServer struct { -} - -func (*UnimplementedManagementServer) NodeHealthCheck(ctx context.Context, req *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodeHealthCheck not implemented") -} -func (*UnimplementedManagementServer) NodeInfo(ctx context.Context, req *empty.Empty) (*NodeInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodeInfo not implemented") -} -func (*UnimplementedManagementServer) ClusterJoin(ctx context.Context, req *ClusterJoinRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method ClusterJoin not implemented") -} -func (*UnimplementedManagementServer) ClusterLeave(ctx context.Context, req *ClusterLeaveRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method ClusterLeave not implemented") -} -func (*UnimplementedManagementServer) ClusterInfo(ctx context.Context, req *empty.Empty) (*ClusterInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ClusterInfo not implemented") -} -func (*UnimplementedManagementServer) ClusterWatch(req *empty.Empty, srv Management_ClusterWatchServer) error { - return status.Errorf(codes.Unimplemented, "method ClusterWatch not implemented") -} -func (*UnimplementedManagementServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") -} -func (*UnimplementedManagementServer) Set(ctx context.Context, req *SetRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") -} -func (*UnimplementedManagementServer) Delete(ctx context.Context, req *DeleteRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} -func (*UnimplementedManagementServer) Watch(req *WatchRequest, srv Management_WatchServer) error { - return status.Errorf(codes.Unimplemented, "method Watch not implemented") -} -func (*UnimplementedManagementServer) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Snapshot not implemented") -} - -func RegisterManagementServer(s *grpc.Server, srv ManagementServer) { - s.RegisterService(&_Management_serviceDesc, srv) -} - -func _Management_NodeHealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeHealthCheckRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagementServer).NodeHealthCheck(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/management.Management/NodeHealthCheck", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).NodeHealthCheck(ctx, req.(*NodeHealthCheckRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Management_NodeInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagementServer).NodeInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/management.Management/NodeInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).NodeInfo(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Management_ClusterJoin_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ClusterJoinRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagementServer).ClusterJoin(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/management.Management/ClusterJoin", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).ClusterJoin(ctx, req.(*ClusterJoinRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Management_ClusterLeave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ClusterLeaveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagementServer).ClusterLeave(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/management.Management/ClusterLeave", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).ClusterLeave(ctx, req.(*ClusterLeaveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Management_ClusterInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagementServer).ClusterInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/management.Management/ClusterInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).ClusterInfo(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Management_ClusterWatch_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(empty.Empty) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ManagementServer).ClusterWatch(m, &managementClusterWatchServer{stream}) -} - -type Management_ClusterWatchServer interface { - Send(*ClusterWatchResponse) error - grpc.ServerStream -} - -type managementClusterWatchServer struct { - grpc.ServerStream -} - -func (x *managementClusterWatchServer) Send(m *ClusterWatchResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Management_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagementServer).Get(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/management.Management/Get", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).Get(ctx, req.(*GetRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Management_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagementServer).Set(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/management.Management/Set", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).Set(ctx, req.(*SetRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Management_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagementServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/management.Management/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).Delete(ctx, req.(*DeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Management_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(WatchRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ManagementServer).Watch(m, &managementWatchServer{stream}) -} - -type Management_WatchServer interface { - Send(*WatchResponse) error - grpc.ServerStream -} - -type managementWatchServer struct { - grpc.ServerStream -} - -func (x *managementWatchServer) Send(m *WatchResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Management_Snapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagementServer).Snapshot(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/management.Management/Snapshot", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).Snapshot(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -var _Management_serviceDesc = grpc.ServiceDesc{ - ServiceName: "management.Management", - HandlerType: (*ManagementServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "NodeHealthCheck", - Handler: _Management_NodeHealthCheck_Handler, - }, - { - MethodName: "NodeInfo", - Handler: _Management_NodeInfo_Handler, - }, - { - MethodName: "ClusterJoin", - Handler: _Management_ClusterJoin_Handler, - }, - { - MethodName: "ClusterLeave", - Handler: _Management_ClusterLeave_Handler, - }, - { - MethodName: "ClusterInfo", - Handler: _Management_ClusterInfo_Handler, - }, - { - MethodName: "Get", - Handler: _Management_Get_Handler, - }, - { - MethodName: "Set", - Handler: _Management_Set_Handler, - }, - { - MethodName: "Delete", - Handler: _Management_Delete_Handler, - }, - { - MethodName: "Snapshot", - Handler: _Management_Snapshot_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ClusterWatch", - Handler: _Management_ClusterWatch_Handler, - ServerStreams: true, - }, - { - StreamName: "Watch", - Handler: _Management_Watch_Handler, - ServerStreams: true, - }, - }, - Metadata: "protobuf/management/management.proto", -} diff --git a/protobuf/management/management.pb.gw.go b/protobuf/management/management.pb.gw.go deleted file mode 100644 index 5430218..0000000 --- a/protobuf/management/management.pb.gw.go +++ /dev/null @@ -1,379 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: protobuf/management/management.proto - -/* -Package management is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package management - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/empty" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray - -var ( - filter_Management_NodeHealthCheck_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Management_NodeHealthCheck_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq NodeHealthCheckRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Management_NodeHealthCheck_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.NodeHealthCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Management_NodeInfo_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq empty.Empty - var metadata runtime.ServerMetadata - - msg, err := client.NodeInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Management_ClusterInfo_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq empty.Empty - var metadata runtime.ServerMetadata - - msg, err := client.ClusterInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Management_Get_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["key"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") - } - - protoReq.Key, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) - } - - msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Management_Set_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SetRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["key"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") - } - - protoReq.Key, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) - } - - msg, err := client.Set(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Management_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DeleteRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["key"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") - } - - protoReq.Key, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) - } - - msg, err := client.Delete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Management_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq empty.Empty - var metadata runtime.ServerMetadata - - msg, err := client.Snapshot(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -// RegisterManagementHandlerFromEndpoint is same as RegisterManagementHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterManagementHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterManagementHandler(ctx, mux, conn) -} - -// RegisterManagementHandler registers the http handlers for service Management to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterManagementHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterManagementHandlerClient(ctx, mux, NewManagementClient(conn)) -} - -// RegisterManagementHandlerClient registers the http handlers for service Management -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ManagementClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ManagementClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "ManagementClient" to call the correct interceptors. -func RegisterManagementHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ManagementClient) error { - - mux.Handle("GET", pattern_Management_NodeHealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Management_NodeHealthCheck_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Management_NodeHealthCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Management_NodeInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Management_NodeInfo_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Management_NodeInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Management_ClusterInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Management_ClusterInfo_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Management_ClusterInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Management_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Management_Get_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Management_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("PUT", pattern_Management_Set_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Management_Set_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Management_Set_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_Management_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Management_Delete_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Management_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Management_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Management_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Management_Snapshot_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Management_NodeHealthCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "healthcheck"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Management_NodeInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "status"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Management_ClusterInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "cluster", "status"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Management_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "data", "key"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Management_Set_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "data", "key"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Management_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "data", "key"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Management_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "snapshot"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Management_NodeHealthCheck_0 = runtime.ForwardResponseMessage - - forward_Management_NodeInfo_0 = runtime.ForwardResponseMessage - - forward_Management_ClusterInfo_0 = runtime.ForwardResponseMessage - - forward_Management_Get_0 = runtime.ForwardResponseMessage - - forward_Management_Set_0 = runtime.ForwardResponseMessage - - forward_Management_Delete_0 = runtime.ForwardResponseMessage - - forward_Management_Snapshot_0 = runtime.ForwardResponseMessage -) diff --git a/protobuf/management/management.proto b/protobuf/management/management.proto deleted file mode 100644 index e3c8429..0000000 --- a/protobuf/management/management.proto +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -import "google/protobuf/any.proto"; -import "google/protobuf/empty.proto"; -import "google/api/annotations.proto"; -import "protoc-gen-swagger/options/annotations.proto"; - -package management; - -option go_package = "github.com/mosuka/blast/protobuf/management"; - -service Management { - rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) { - option (google.api.http) = { - get: "/v1/node/healthcheck" - }; - } - rpc NodeInfo (google.protobuf.Empty) returns (NodeInfoResponse) { - option (google.api.http) = { - get: "/v1/node/status" - }; - } - - rpc ClusterJoin (ClusterJoinRequest) returns (google.protobuf.Empty) {} - rpc ClusterLeave (ClusterLeaveRequest) returns (google.protobuf.Empty) {} - rpc ClusterInfo (google.protobuf.Empty) returns (ClusterInfoResponse) { - option (google.api.http) = { - get: "/v1/cluster/status" - }; - } - rpc ClusterWatch (google.protobuf.Empty) returns (stream ClusterWatchResponse) {} - - rpc Get (GetRequest) returns (GetResponse) { - option (google.api.http) = { - get: "/v1/data/{key=**}" - }; - } - rpc Set (SetRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - put: "/v1/data/{key=**}" - body: "*" - }; - } - rpc Delete (DeleteRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/data/{key=**}" - }; - } - rpc Watch (WatchRequest) returns (stream WatchResponse) {} - rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) { - option (google.api.http) = { - get: "/v1/snapshot" - }; - } -} - -message NodeHealthCheckRequest { - enum Probe { - UNKNOWN = 0; - HEALTHINESS = 1; - LIVENESS = 2; - READINESS = 3; - } - Probe probe = 1; -} - -message NodeHealthCheckResponse { - enum State { - UNKNOWN = 0; - HEALTHY = 1; - UNHEALTHY = 2; - ALIVE = 3; - DEAD = 4; - READY = 5; - NOT_READY = 6; - } - State state = 1; -} - -message Metadata { - string grpc_address = 1; - string grpc_gateway_address = 2; - string http_address = 3; -} - -message Node { - enum State { - UNKNOWN = 0; - FOLLOWER = 1; - CANDIDATE = 2; - LEADER = 3; - SHUTDOWN = 4; - } - string id = 1; - string bind_address = 2; - State state = 3; - Metadata metadata = 4; -} - -message Cluster { - map nodes = 1; -} - -message NodeInfoResponse { - Node node = 1; -} - -message ClusterJoinRequest { - Node node = 1; -} - -message ClusterLeaveRequest { - string id = 1; -} - -message ClusterInfoResponse { - Cluster cluster = 1; -} - -message ClusterWatchResponse { - enum Event { - UNKNOWN = 0; - JOIN = 1; - LEAVE = 2; - UPDATE = 3; - } - Event event = 1; - Node node = 2; - Cluster cluster = 3; -} - -message KeyValue { - string key = 1; - google.protobuf.Any value = 2; -} - -message GetRequest { - string key = 1; -} - -message GetResponse { -// option (grpc.gateway.protoc_gen_swagger.options.openapiv2_schema) = { -// json_schema: { -// required: ["value"] -// }, -// example: { -// value: '{ "fields": { "field1": "Get Example", "field2": "This is an example Get response." } }' -// } -// }; -// google.protobuf.Any value = 1 [(grpc.gateway.protoc_gen_swagger.options.openapiv2_field) = {type: 6}]; - google.protobuf.Any value = 1; -} - -message SetRequest { - string key = 1; - google.protobuf.Any value = 2; -} - -message DeleteRequest { - string key = 1; -} - -message WatchRequest { - string key = 1; -} - -message WatchResponse { - enum Command { - UNKNOWN = 0; - SET = 1; - DELETE = 2; - } - Command command = 1; - string key = 2; - google.protobuf.Any value = 3; -} - -message Proposal { - enum Event { - UNKNOWN = 0; - SET_NODE = 1; - DELETE_NODE = 2; - SET_VALUE = 3; - DELETE_VALUE = 4; - } - Event event = 1; - Node node = 2; - KeyValue key_value = 3; -} diff --git a/protobuf/management/management.swagger.json b/protobuf/management/management.swagger.json deleted file mode 100644 index 18f1ed1..0000000 --- a/protobuf/management/management.swagger.json +++ /dev/null @@ -1,409 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "protobuf/management/management.proto", - "version": "version not set" - }, - "schemes": [ - "http", - "https" - ], - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/v1/cluster/status": { - "get": { - "operationId": "ClusterInfo", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/managementClusterInfoResponse" - } - } - }, - "tags": [ - "Management" - ] - } - }, - "/v1/data/{key}": { - "get": { - "operationId": "Get", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/managementGetResponse" - } - } - }, - "parameters": [ - { - "name": "key", - "in": "path", - "required": true, - "type": "string" - } - ], - "tags": [ - "Management" - ] - }, - "delete": { - "operationId": "Delete", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "properties": {} - } - } - }, - "parameters": [ - { - "name": "key", - "in": "path", - "required": true, - "type": "string" - } - ], - "tags": [ - "Management" - ] - }, - "put": { - "operationId": "Set", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "properties": {} - } - } - }, - "parameters": [ - { - "name": "key", - "in": "path", - "required": true, - "type": "string" - }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/managementSetRequest" - } - } - ], - "tags": [ - "Management" - ] - } - }, - "/v1/node/healthcheck": { - "get": { - "operationId": "NodeHealthCheck", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/managementNodeHealthCheckResponse" - } - } - }, - "parameters": [ - { - "name": "probe", - "in": "query", - "required": false, - "type": "string", - "enum": [ - "UNKNOWN", - "HEALTHINESS", - "LIVENESS", - "READINESS" - ], - "default": "UNKNOWN" - } - ], - "tags": [ - "Management" - ] - } - }, - "/v1/node/status": { - "get": { - "operationId": "NodeInfo", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/managementNodeInfoResponse" - } - } - }, - "tags": [ - "Management" - ] - } - }, - "/v1/snapshot": { - "get": { - "operationId": "Snapshot", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "properties": {} - } - } - }, - "tags": [ - "Management" - ] - } - } - }, - "definitions": { - "NodeHealthCheckRequestProbe": { - "type": "string", - "enum": [ - "UNKNOWN", - "HEALTHINESS", - "LIVENESS", - "READINESS" - ], - "default": "UNKNOWN" - }, - "WatchResponseCommand": { - "type": "string", - "enum": [ - "UNKNOWN", - "SET", - "DELETE" - ], - "default": "UNKNOWN" - }, - "managementCluster": { - "type": "object", - "properties": { - "nodes": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/managementNode" - } - } - } - }, - "managementClusterInfoResponse": { - "type": "object", - "properties": { - "cluster": { - "$ref": "#/definitions/managementCluster" - } - } - }, - "managementClusterWatchResponse": { - "type": "object", - "properties": { - "event": { - "$ref": "#/definitions/managementClusterWatchResponseEvent" - }, - "node": { - "$ref": "#/definitions/managementNode" - }, - "cluster": { - "$ref": "#/definitions/managementCluster" - } - } - }, - "managementClusterWatchResponseEvent": { - "type": "string", - "enum": [ - "UNKNOWN", - "JOIN", - "LEAVE", - "UPDATE" - ], - "default": "UNKNOWN" - }, - "managementGetResponse": { - "type": "object", - "properties": { - "value": { - "$ref": "#/definitions/protobufAny", - "title": "option (grpc.gateway.protoc_gen_swagger.options.openapiv2_schema) = {\n json_schema: {\n required: [\"value\"]\n },\n example: {\n value: '{ \"fields\": { \"field1\": \"Get Example\", \"field2\": \"This is an example Get response.\" } }'\n }\n };\n google.protobuf.Any value = 1 [(grpc.gateway.protoc_gen_swagger.options.openapiv2_field) = {type: 6}];" - } - } - }, - "managementMetadata": { - "type": "object", - "properties": { - "grpc_address": { - "type": "string" - }, - "grpc_gateway_address": { - "type": "string" - }, - "http_address": { - "type": "string" - } - } - }, - "managementNode": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "bind_address": { - "type": "string" - }, - "state": { - "$ref": "#/definitions/managementNodeState" - }, - "metadata": { - "$ref": "#/definitions/managementMetadata" - } - } - }, - "managementNodeHealthCheckResponse": { - "type": "object", - "properties": { - "state": { - "$ref": "#/definitions/managementNodeHealthCheckResponseState" - } - } - }, - "managementNodeHealthCheckResponseState": { - "type": "string", - "enum": [ - "UNKNOWN", - "HEALTHY", - "UNHEALTHY", - "ALIVE", - "DEAD", - "READY", - "NOT_READY" - ], - "default": "UNKNOWN" - }, - "managementNodeInfoResponse": { - "type": "object", - "properties": { - "node": { - "$ref": "#/definitions/managementNode" - } - } - }, - "managementNodeState": { - "type": "string", - "enum": [ - "UNKNOWN", - "FOLLOWER", - "CANDIDATE", - "LEADER", - "SHUTDOWN" - ], - "default": "UNKNOWN" - }, - "managementSetRequest": { - "type": "object", - "properties": { - "key": { - "type": "string" - }, - "value": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "managementWatchResponse": { - "type": "object", - "properties": { - "command": { - "$ref": "#/definitions/WatchResponseCommand" - }, - "key": { - "type": "string" - }, - "value": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string", - "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." - }, - "value": { - "type": "string", - "format": "byte", - "description": "Must be a valid serialized protocol buffer of the above specified type." - } - }, - "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" - }, - "runtimeStreamError": { - "type": "object", - "properties": { - "grpc_code": { - "type": "integer", - "format": "int32" - }, - "http_code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "http_status": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - } - }, - "x-stream-definitions": { - "managementClusterWatchResponse": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/managementClusterWatchResponse" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of managementClusterWatchResponse" - }, - "managementWatchResponse": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/managementWatchResponse" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of managementWatchResponse" - } - } -} diff --git a/protobuf/util.go b/protobuf/util.go deleted file mode 100644 index d3a6ca5..0000000 --- a/protobuf/util.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package protobuf - -import ( - "encoding/json" - - "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/registry" -) - -func MarshalAny(message *any.Any) (interface{}, error) { - if message == nil { - return nil, nil - } - - typeUrl := message.TypeUrl - value := message.Value - - instance := registry.TypeInstanceByName(typeUrl) - - err := json.Unmarshal(value, instance) - if err != nil { - return nil, err - } - - return instance, nil -} - -func UnmarshalAny(instance interface{}, message *any.Any) error { - var err error - - if instance == nil { - return nil - } - - message.TypeUrl = registry.TypeNameByInstance(instance) - - message.Value, err = json.Marshal(instance) - if err != nil { - return err - } - - return nil -} diff --git a/protobuf/util_test.go b/protobuf/util_test.go deleted file mode 100644 index 9523b51..0000000 --- a/protobuf/util_test.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package protobuf - -import ( - "bytes" - "testing" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/search/query" - "github.com/golang/protobuf/ptypes/any" -) - -func TestMarshalAny_Slice(t *testing.T) { - data := []interface{}{"a", 1} - - dataAny := &any.Any{} - err := UnmarshalAny(data, dataAny) - if err != nil { - t.Fatalf("%v", err) - } - - expectedType := "[]interface {}" - actualType := dataAny.TypeUrl - if expectedType != actualType { - t.Fatalf("expected content to see %s, saw %s", expectedType, actualType) - } - - expectedValue := []byte(`["a",1]`) - actualValue := dataAny.Value - if !bytes.Equal(expectedValue, actualValue) { - t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) - } -} - -func TestMarshalAny_Map(t *testing.T) { - data := map[string]interface{}{"a": 1, "b": 2, "c": 3} - - dataAny := &any.Any{} - err := UnmarshalAny(data, dataAny) - if err != nil { - t.Fatalf("%v", err) - } - - expectedMapType := "map[string]interface {}" - actualMapType := dataAny.TypeUrl - if expectedMapType != actualMapType { - t.Fatalf("expected content to see %s, saw %s", expectedMapType, actualMapType) - } - - expectedValue := []byte(`{"a":1,"b":2,"c":3}`) - actualValue := dataAny.Value - if !bytes.Equal(expectedValue, actualValue) { - t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) - } -} - -//func TestMarshalAny_Document(t *testing.T) { -// fieldsMap := map[string]interface{}{"f1": "aaa", "f2": 222, "f3": "ccc"} -// fieldsAny := &any.Any{} -// err := UnmarshalAny(fieldsMap, fieldsAny) -// if err != nil { -// t.Fatalf("%v", err) -// } -// -// data := &index.Document{ -// Id: "1", -// Fields: fieldsAny, -// } -// -// dataAny := &any.Any{} -// err = UnmarshalAny(data, dataAny) -// if err != nil { -// t.Fatalf("%v", err) -// } -// -// expectedType := "index.Document" -// actualType := dataAny.TypeUrl -// if expectedType != actualType { -// t.Fatalf("expected content to see %s, saw %s", expectedType, actualType) -// } -// -// expectedValue := []byte(`{"id":"1","fields":{"type_url":"map[string]interface {}","value":"eyJmMSI6ImFhYSIsImYyIjoyMjIsImYzIjoiY2NjIn0="}}`) -// actualValue := dataAny.Value -// if !bytes.Equal(expectedValue, actualValue) { -// t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) -// } -//} - -//func TestMarshalAny_Node(t *testing.T) { -// data := &raft.Node{ -// Id: "node1", -// Metadata: &raft.Metadata{ -// GrpcAddr: ":5050", -// DataDir: "/tmp/blast/index1", -// BindAddr: ":6060", -// HttpAddr: ":8080", -// Leader: true, -// }, -// } -// -// dataAny := &any.Any{} -// err := UnmarshalAny(data, dataAny) -// if err != nil { -// t.Fatalf("%v", err) -// } -// -// expectedType := "raft.Node" -// actualType := dataAny.TypeUrl -// if expectedType != actualType { -// t.Fatalf("expected content to see %s, saw %s", expectedType, actualType) -// } -// -// expectedValue := []byte(`{"id":"node1","metadata":{"bind_addr":":6060","grpc_addr":":5050","http_addr":":8080","data_dir":"/tmp/blast/index1","leader":true}}`) -// actualValue := dataAny.Value -// if !bytes.Equal(expectedValue, actualValue) { -// t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) -// } -//} - -func TestMarshalAny_SearchRequest(t *testing.T) { - data := bleve.NewSearchRequest(bleve.NewQueryStringQuery("blast")) - - dataAny := &any.Any{} - err := UnmarshalAny(data, dataAny) - if err != nil { - t.Fatalf("%v", err) - } - - expectedType := "bleve.SearchRequest" - actualType := dataAny.TypeUrl - if expectedType != actualType { - t.Fatalf("expected content to see %s, saw %s", expectedType, actualType) - } - - expectedValue := []byte(`{"query":{"query":"blast"},"size":10,"from":0,"highlight":null,"fields":null,"facets":null,"explain":false,"sort":["-_score"],"includeLocations":false,"search_after":null,"search_before":null}`) - actualValue := dataAny.Value - if !bytes.Equal(expectedValue, actualValue) { - t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) - } -} - -func TestMarshalAny_SearchResult(t *testing.T) { - data := &bleve.SearchResult{ - Total: 10, - } - - dataAny := &any.Any{} - err := UnmarshalAny(data, dataAny) - if err != nil { - t.Fatalf("%v", err) - } - - expectedType := "bleve.SearchResult" - actualType := dataAny.TypeUrl - if expectedType != actualType { - t.Fatalf("expected content to see %s, saw %s", expectedType, actualType) - } - - expectedValue := []byte(`{"status":null,"request":null,"hits":null,"total_hits":10,"max_score":0,"took":0,"facets":null}`) - actualValue := dataAny.Value - if !bytes.Equal(expectedValue, actualValue) { - t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) - } -} - -func TestUnmarshalAny_Slice(t *testing.T) { - dataAny := &any.Any{ - TypeUrl: "[]interface {}", - Value: []byte(`["a",1]`), - } - - ins, err := MarshalAny(dataAny) - if err != nil { - t.Fatalf("%v", err) - } - - data := *ins.(*[]interface{}) - - expected1 := "a" - actual1 := data[0] - if expected1 != actual1 { - t.Fatalf("expected content to see %v, saw %v", expected1, actual1) - } - - expected2 := float64(1) - actual2 := data[1] - if expected2 != actual2 { - t.Fatalf("expected content to see %v, saw %v", expected2, actual2) - } -} - -func TestUnmarshalAny_Map(t *testing.T) { - dataAny := &any.Any{ - TypeUrl: "map[string]interface {}", - Value: []byte(`{"a":1,"b":2,"c":3}`), - } - - ins, err := MarshalAny(dataAny) - if err != nil { - t.Fatalf("%v", err) - } - - data := *ins.(*map[string]interface{}) - - expected1 := float64(1) - actual1 := data["a"] - if expected1 != actual1 { - t.Fatalf("expected content to see %v, saw %v", expected1, actual1) - } - - expected2 := float64(2) - actual2 := data["b"] - if expected2 != actual2 { - t.Fatalf("expected content to see %v, saw %v", expected2, actual2) - } - - expected3 := float64(3) - actual3 := data["c"] - if expected3 != actual3 { - t.Fatalf("expected content to see %v, saw %v", expected3, actual3) - } -} - -//func TestUnmarshalAny_Document(t *testing.T) { -// dataAny := &any.Any{ -// TypeUrl: "index.Document", -// Value: []byte(`{"id":"1","fields":{"type_url":"map[string]interface {}","value":"eyJmMSI6ImFhYSIsImYyIjoyMjIsImYzIjoiY2NjIn0="}}`), -// } -// -// ins, err := MarshalAny(dataAny) -// if err != nil { -// t.Fatalf("%v", err) -// } -// -// data := *ins.(*index.Document) -// -// expected1 := "1" -// actual1 := data.Id -// if expected1 != actual1 { -// t.Fatalf("expected content to see %v, saw %v", expected1, actual1) -// } -// -// expected2 := "map[string]interface {}" -// actual2 := data.Fields.TypeUrl -// if expected2 != actual2 { -// t.Fatalf("expected content to see %v, saw %v", expected2, actual2) -// } -// -// expected3 := []byte(`{"f1":"aaa","f2":222,"f3":"ccc"}`) -// actual3 := data.Fields.Value -// if !bytes.Equal(expected3, actual3) { -// t.Fatalf("expected content to see %v, saw %v", expected3, actual3) -// } -//} - -func TestUnmarshalAny_SearchRequest(t *testing.T) { - dataAny := &any.Any{ - TypeUrl: "bleve.SearchRequest", - Value: []byte(`{"query":{"query":"blast"},"size":10,"from":0,"highlight":null,"fields":null,"facets":null,"explain":false,"sort":["-_score"],"includeLocations":false}`), - } - - ins, err := MarshalAny(dataAny) - if err != nil { - t.Fatalf("%v", err) - } - - data := *ins.(*bleve.SearchRequest) - - expected1 := bleve.NewQueryStringQuery("blast").Query - actual1 := data.Query.(*query.QueryStringQuery).Query - if expected1 != actual1 { - t.Fatalf("expected content to see %v, saw %v", expected1, actual1) - } -} - -func TestUnmarshalAny_SearchResult(t *testing.T) { - dataAny := &any.Any{ - TypeUrl: "bleve.SearchResult", - Value: []byte(`{"status":null,"request":null,"hits":null,"total_hits":10,"max_score":0,"took":0,"facets":null}`), - } - - ins, err := MarshalAny(dataAny) - if err != nil { - t.Fatalf("%v", err) - } - - data := *ins.(*bleve.SearchResult) - - expected1 := uint64(10) - actual1 := data.Total - if expected1 != actual1 { - t.Fatalf("expected content to see %v, saw %v", expected1, actual1) - } -} diff --git a/registry/type.go b/registry/type.go index 5cb1206..7dc13b0 100644 --- a/registry/type.go +++ b/registry/type.go @@ -1,57 +1,11 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package registry import ( "errors" "fmt" "reflect" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/mapping" ) -func init() { - RegisterType("bool", reflect.TypeOf(false)) - RegisterType("string", reflect.TypeOf("")) - RegisterType("int", reflect.TypeOf(int(0))) - RegisterType("int8", reflect.TypeOf(int8(0))) - RegisterType("int16", reflect.TypeOf(int16(0))) - RegisterType("int32", reflect.TypeOf(int32(0))) - RegisterType("int64", reflect.TypeOf(int64(0))) - RegisterType("uint", reflect.TypeOf(uint(0))) - RegisterType("uint8", reflect.TypeOf(uint8(0))) - RegisterType("uint16", reflect.TypeOf(uint16(0))) - RegisterType("uint32", reflect.TypeOf(uint32(0))) - RegisterType("uint64", reflect.TypeOf(uint64(0))) - RegisterType("uintptr", reflect.TypeOf(uintptr(0))) - RegisterType("byte", reflect.TypeOf(byte(0))) - RegisterType("rune", reflect.TypeOf(rune(0))) - RegisterType("float32", reflect.TypeOf(float32(0))) - RegisterType("float64", reflect.TypeOf(float64(0))) - RegisterType("complex64", reflect.TypeOf(complex64(0))) - RegisterType("complex128", reflect.TypeOf(complex128(0))) - - RegisterType("map[string]interface {}", reflect.TypeOf((map[string]interface{})(nil))) - RegisterType("[]interface {}", reflect.TypeOf(([]interface{})(nil))) - - RegisterType("mapping.IndexMappingImpl", reflect.TypeOf(mapping.IndexMappingImpl{})) - RegisterType("bleve.SearchRequest", reflect.TypeOf(bleve.SearchRequest{})) - RegisterType("bleve.SearchResult", reflect.TypeOf(bleve.SearchResult{})) -} - type TypeRegistry map[string]reflect.Type var Types = make(TypeRegistry, 0) @@ -68,13 +22,11 @@ func TypeByName(name string) reflect.Type { } func TypeNameByInstance(instance interface{}) string { - switch instance.(type) { - case bool, string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr, float32, float64, complex64, complex128: - return reflect.TypeOf(instance).Name() - case map[string]interface{}, []interface{}: - return reflect.TypeOf(instance).String() + switch ins := instance.(type) { + case map[string]interface{}: + return reflect.TypeOf(ins).String() default: - return reflect.TypeOf(instance).Elem().String() + return reflect.TypeOf(ins).Elem().String() } } diff --git a/server/grpc_gateway.go b/server/grpc_gateway.go new file mode 100644 index 0000000..c319fc0 --- /dev/null +++ b/server/grpc_gateway.go @@ -0,0 +1,129 @@ +package server + +import ( + "context" + "math" + "net" + "net/http" + "time" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" +) + +func responseFilter(ctx context.Context, w http.ResponseWriter, resp proto.Message) error { + switch resp.(type) { + case *protobuf.GetResponse: + w.Header().Set("Content-Type", "application/json") + case *protobuf.MetricsResponse: + w.Header().Set("Content-Type", "text/plain; version=0.0.4; charset=utf-8") + default: + w.Header().Set("Content-Type", marshaler.DefaultContentType) + } + + return nil +} + +type GRPCGateway struct { + httpAddress string + grpcAddress string + + cancel context.CancelFunc + listener net.Listener + mux *runtime.ServeMux + + certificateFile string + keyFile string + + logger *zap.Logger +} + +func NewGRPCGateway(httpAddress string, grpcAddress string, certificateFile string, keyFile string, commonName string, logger *zap.Logger) (*GRPCGateway, error) { + dialOpts := []grpc.DialOption{ + grpc.WithDefaultCallOptions( + grpc.MaxCallSendMsgSize(math.MaxInt64), + grpc.MaxCallRecvMsgSize(math.MaxInt64), + ), + grpc.WithKeepaliveParams( + keepalive.ClientParameters{ + Time: 1 * time.Second, + Timeout: 5 * time.Second, + PermitWithoutStream: true, + }, + ), + } + + baseCtx := context.TODO() + ctx, cancel := context.WithCancel(baseCtx) + + mux := runtime.NewServeMux( + runtime.WithMarshalerOption(runtime.MIMEWildcard, new(marshaler.BlastMarshaler)), + runtime.WithForwardResponseOption(responseFilter), + ) + + if certificateFile == "" { + dialOpts = append(dialOpts, grpc.WithInsecure()) + } else { + creds, err := credentials.NewClientTLSFromFile(certificateFile, commonName) + if err != nil { + return nil, err + } + dialOpts = append(dialOpts, grpc.WithTransportCredentials(creds)) + } + + err := protobuf.RegisterIndexHandlerFromEndpoint(ctx, mux, grpcAddress, dialOpts) + if err != nil { + logger.Error("failed to register KVS handler from endpoint", zap.Error(err)) + return nil, err + } + + listener, err := net.Listen("tcp", httpAddress) + if err != nil { + logger.Error("failed to create index service", zap.Error(err)) + return nil, err + } + + return &GRPCGateway{ + httpAddress: httpAddress, + grpcAddress: grpcAddress, + listener: listener, + mux: mux, + cancel: cancel, + certificateFile: certificateFile, + keyFile: keyFile, + logger: logger, + }, nil +} + +func (s *GRPCGateway) Start() error { + if s.certificateFile == "" && s.keyFile == "" { + go func() { + _ = http.Serve(s.listener, s.mux) + }() + } else { + go func() { + _ = http.ServeTLS(s.listener, s.mux, s.certificateFile, s.keyFile) + }() + } + + s.logger.Info("gRPC gateway started", zap.String("http_address", s.httpAddress)) + return nil +} + +func (s *GRPCGateway) Stop() error { + defer s.cancel() + + err := s.listener.Close() + if err != nil { + s.logger.Error("failed to close listener", zap.String("http_address", s.listener.Addr().String()), zap.Error(err)) + } + + s.logger.Info("gRPC gateway stopped", zap.String("http_address", s.httpAddress)) + return nil +} diff --git a/server/grpc_server.go b/server/grpc_server.go new file mode 100644 index 0000000..d01f5a3 --- /dev/null +++ b/server/grpc_server.go @@ -0,0 +1,129 @@ +package server + +import ( + "math" + "net" + "time" + + grpcmiddleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpczap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/mosuka/blast/metric" + "github.com/mosuka/blast/protobuf" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" +) + +type GRPCServer struct { + grpcAddress string + service *GRPCService + server *grpc.Server + listener net.Listener + + certificateFile string + keyFile string + commonName string + + logger *zap.Logger +} + +func NewGRPCServer(grpcAddress string, raftServer *RaftServer, certificateFile string, keyFile string, commonName string, logger *zap.Logger) (*GRPCServer, error) { + grpcLogger := logger.Named("grpc") + + opts := []grpc.ServerOption{ + grpc.MaxRecvMsgSize(math.MaxInt64), + grpc.MaxSendMsgSize(math.MaxInt64), + grpc.StreamInterceptor( + grpcmiddleware.ChainStreamServer( + metric.GrpcMetrics.StreamServerInterceptor(), + grpczap.StreamServerInterceptor(grpcLogger), + ), + ), + grpc.UnaryInterceptor( + grpcmiddleware.ChainUnaryServer( + metric.GrpcMetrics.UnaryServerInterceptor(), + grpczap.UnaryServerInterceptor(grpcLogger), + ), + ), + grpc.KeepaliveParams( + keepalive.ServerParameters{ + //MaxConnectionIdle: 0, + //MaxConnectionAge: 0, + //MaxConnectionAgeGrace: 0, + Time: 5 * time.Second, + Timeout: 5 * time.Second, + }, + ), + } + + if certificateFile == "" && keyFile == "" { + logger.Info("disabling TLS") + } else { + logger.Info("enabling TLS") + creds, err := credentials.NewServerTLSFromFile(certificateFile, keyFile) + if err != nil { + logger.Error("failed to create credentials", zap.Error(err)) + } + opts = append(opts, grpc.Creds(creds)) + } + + server := grpc.NewServer( + opts..., + ) + + service, err := NewGRPCService(raftServer, certificateFile, commonName, logger) + if err != nil { + logger.Error("failed to create key value store service", zap.Error(err)) + return nil, err + } + + protobuf.RegisterIndexServer(server, service) + + // Initialize all metrics. + metric.GrpcMetrics.InitializeMetrics(server) + grpc_prometheus.Register(server) + + listener, err := net.Listen("tcp", grpcAddress) + if err != nil { + logger.Error("failed to create listener", zap.String("grpc_address", grpcAddress), zap.Error(err)) + return nil, err + } + + return &GRPCServer{ + grpcAddress: grpcAddress, + service: service, + server: server, + listener: listener, + certificateFile: certificateFile, + keyFile: keyFile, + commonName: commonName, + logger: logger, + }, nil +} + +func (s *GRPCServer) Start() error { + if err := s.service.Start(); err != nil { + s.logger.Error("failed to start service", zap.Error(err)) + } + + go func() { + _ = s.server.Serve(s.listener) + }() + + s.logger.Info("gRPC server started", zap.String("grpc_address", s.grpcAddress)) + return nil +} + +func (s *GRPCServer) Stop() error { + if err := s.service.Stop(); err != nil { + s.logger.Error("failed to stop service", zap.Error(err)) + } + + //s.server.GracefulStop() + s.server.Stop() + + s.logger.Info("gRPC server stopped", zap.String("grpc_address", s.grpcAddress)) + return nil +} diff --git a/server/grpc_service.go b/server/grpc_service.go new file mode 100644 index 0000000..2d0843a --- /dev/null +++ b/server/grpc_service.go @@ -0,0 +1,540 @@ +package server + +import ( + "bytes" + "context" + "encoding/json" + "sync" + "time" + + "github.com/blevesearch/bleve" + "github.com/golang/protobuf/ptypes/empty" + "github.com/hashicorp/raft" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/metric" + "github.com/mosuka/blast/protobuf" + "github.com/prometheus/common/expfmt" + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type GRPCService struct { + raftServer *RaftServer + certificateFile string + commonName string + logger *zap.Logger + + watchMutex sync.RWMutex + watchChans map[chan protobuf.WatchResponse]struct{} + + peerClients map[string]*client.GRPCClient + + watchClusterStopCh chan struct{} + watchClusterDoneCh chan struct{} +} + +func NewGRPCService(raftServer *RaftServer, certificateFile string, commonName string, logger *zap.Logger) (*GRPCService, error) { + return &GRPCService{ + raftServer: raftServer, + certificateFile: certificateFile, + commonName: commonName, + logger: logger, + + watchChans: make(map[chan protobuf.WatchResponse]struct{}), + + peerClients: make(map[string]*client.GRPCClient, 0), + + watchClusterStopCh: make(chan struct{}), + watchClusterDoneCh: make(chan struct{}), + }, nil +} + +func (s *GRPCService) Start() error { + go func() { + s.startWatchCluster(500 * time.Millisecond) + }() + + s.logger.Info("gRPC service started") + return nil +} + +func (s *GRPCService) Stop() error { + s.stopWatchCluster() + + s.logger.Info("gRPC service stopped") + return nil +} + +func (s *GRPCService) startWatchCluster(checkInterval time.Duration) { + s.logger.Info("start to update cluster info") + + defer func() { + close(s.watchClusterDoneCh) + }() + + ticker := time.NewTicker(checkInterval) + defer ticker.Stop() + + timeout := 60 * time.Second + if err := s.raftServer.WaitForDetectLeader(timeout); err != nil { + if err == errors.ErrTimeout { + s.logger.Error("leader detection timed out", zap.Duration("timeout", timeout), zap.Error(err)) + } else { + s.logger.Error("failed to detect leader", zap.Error(err)) + } + } + + for { + select { + case <-s.watchClusterStopCh: + s.logger.Info("received a request to stop updating a cluster") + return + case event := <-s.raftServer.applyCh: + watchResp := &protobuf.WatchResponse{ + Event: event, + } + for c := range s.watchChans { + c <- *watchResp + } + case <-ticker.C: + s.watchMutex.Lock() + + // open clients for peer nodes + nodes, err := s.raftServer.Nodes() + if err != nil { + s.logger.Warn("failed to get cluster info", zap.String("err", err.Error())) + } + for id, node := range nodes { + if id == s.raftServer.id { + continue + } + + if node.Metadata == nil || node.Metadata.GrpcAddress == "" { + s.logger.Debug("gRPC address missing", zap.String("id", id)) + continue + } + if c, ok := s.peerClients[id]; ok { + if c.Target() != node.Metadata.GrpcAddress { + s.logger.Debug("close client", zap.String("id", id), zap.String("grpc_address", c.Target())) + delete(s.peerClients, id) + if err := c.Close(); err != nil { + s.logger.Warn("failed to close client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) + } + s.logger.Debug("create client", zap.String("id", id), zap.String("grpc_address", node.Metadata.GrpcAddress)) + if newClient, err := client.NewGRPCClientWithContextTLS(node.Metadata.GrpcAddress, context.TODO(), s.certificateFile, s.commonName); err == nil { + s.peerClients[id] = newClient + } else { + s.logger.Warn("failed to create client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) + } + } + } else { + s.logger.Debug("create client", zap.String("id", id), zap.String("grpc_address", node.Metadata.GrpcAddress)) + if newClient, err := client.NewGRPCClientWithContextTLS(node.Metadata.GrpcAddress, context.TODO(), s.certificateFile, s.commonName); err == nil { + s.peerClients[id] = newClient + } else { + s.logger.Warn("failed to create client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) + } + } + } + + // close clients for non-existent peer nodes + for id, c := range s.peerClients { + if _, exist := nodes[id]; !exist { + s.logger.Debug("close client", zap.String("id", id), zap.String("grpc_address", c.Target())) + delete(s.peerClients, id) + if err := c.Close(); err != nil { + s.logger.Warn("failed to close old client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) + } + } + } + + s.watchMutex.Unlock() + } + } +} + +func (s *GRPCService) stopWatchCluster() { + if s.watchClusterStopCh != nil { + s.logger.Info("send a request to stop updating a cluster") + close(s.watchClusterStopCh) + } + + s.logger.Info("wait for the cluster watching to stop") + <-s.watchClusterDoneCh + s.logger.Info("the cluster watching has been stopped") + + s.logger.Info("close all peer clients") + for id, c := range s.peerClients { + s.logger.Debug("close client", zap.String("id", id), zap.String("grpc_address", c.Target())) + delete(s.peerClients, id) + if err := c.Close(); err != nil { + s.logger.Warn("failed to close client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) + } + } +} + +func (s *GRPCService) LivenessCheck(ctx context.Context, req *empty.Empty) (*protobuf.LivenessCheckResponse, error) { + resp := &protobuf.LivenessCheckResponse{} + + resp.Alive = true + + return resp, nil +} + +func (s *GRPCService) ReadinessCheck(ctx context.Context, req *empty.Empty) (*protobuf.ReadinessCheckResponse, error) { + resp := &protobuf.ReadinessCheckResponse{} + + timeout := 10 * time.Second + if err := s.raftServer.WaitForDetectLeader(timeout); err != nil { + s.logger.Error("missing leader node", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + if s.raftServer.State() == raft.Candidate || s.raftServer.State() == raft.Shutdown { + err := errors.ErrNodeNotReady + s.logger.Error(err.Error(), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + resp.Ready = true + + return resp, nil +} + +func (s *GRPCService) Join(ctx context.Context, req *protobuf.JoinRequest) (*empty.Empty, error) { + resp := &empty.Empty{} + + if s.raftServer.raft.State() != raft.Leader { + clusterResp, err := s.Cluster(ctx, &empty.Empty{}) + if err != nil { + s.logger.Error("failed to get cluster info", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + c := s.peerClients[clusterResp.Cluster.Leader] + err = c.Join(req) + if err != nil { + s.logger.Error("failed to forward request", zap.String("grpc_address", c.Target()), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil + } + + err := s.raftServer.Join(req.Id, req.Node) + if err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + s.logger.Debug("node already exists", zap.Any("req", req), zap.Error(err)) + default: + s.logger.Error("failed to join node to the cluster", zap.String("id", req.Id), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + } + + return resp, nil +} + +func (s *GRPCService) Leave(ctx context.Context, req *protobuf.LeaveRequest) (*empty.Empty, error) { + resp := &empty.Empty{} + + if s.raftServer.raft.State() != raft.Leader { + clusterResp, err := s.Cluster(ctx, &empty.Empty{}) + if err != nil { + s.logger.Error("failed to get cluster info", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + c := s.peerClients[clusterResp.Cluster.Leader] + err = c.Leave(req) + if err != nil { + s.logger.Error("failed to forward request", zap.String("grpc_address", c.Target()), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil + } + + err := s.raftServer.Leave(req.Id) + if err != nil { + s.logger.Error("failed to leave node from the cluster", zap.Any("req", req), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) Node(ctx context.Context, req *empty.Empty) (*protobuf.NodeResponse, error) { + resp := &protobuf.NodeResponse{} + + node, err := s.raftServer.Node() + if err != nil { + s.logger.Error("failed to get node info", zap.String("err", err.Error())) + return resp, status.Error(codes.Internal, err.Error()) + } + + resp.Node = node + + return resp, nil +} + +func (s *GRPCService) Cluster(ctx context.Context, req *empty.Empty) (*protobuf.ClusterResponse, error) { + resp := &protobuf.ClusterResponse{} + + cluster := &protobuf.Cluster{} + + nodes, err := s.raftServer.Nodes() + if err != nil { + s.logger.Error("failed to get cluster info", zap.String("err", err.Error())) + return resp, status.Error(codes.Internal, err.Error()) + } + + for id, node := range nodes { + if id == s.raftServer.id { + node.State = s.raftServer.StateStr() + } else { + c := s.peerClients[id] + nodeResp, err := c.Node() + if err != nil { + node.State = raft.Shutdown.String() + s.logger.Error("failed to get node info", zap.String("grpc_address", node.Metadata.GrpcAddress), zap.String("err", err.Error())) + } else { + node.State = nodeResp.Node.State + } + } + } + cluster.Nodes = nodes + + serverID, err := s.raftServer.LeaderID(60 * time.Second) + if err != nil { + s.logger.Error("failed to get cluster info", zap.String("err", err.Error())) + return resp, status.Error(codes.Internal, err.Error()) + } + cluster.Leader = string(serverID) + + resp.Cluster = cluster + + return resp, nil +} + +func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { + resp := &empty.Empty{} + + err := s.raftServer.Snapshot() + if err != nil { + s.logger.Error("failed to snapshot data", zap.String("err", err.Error())) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) Get(ctx context.Context, req *protobuf.GetRequest) (*protobuf.GetResponse, error) { + resp := &protobuf.GetResponse{} + + fields, err := s.raftServer.Get(req.Id) + if err != nil { + switch err { + case errors.ErrNotFound: + s.logger.Debug("document not found", zap.String("id", req.Id), zap.String("err", err.Error())) + return resp, status.Error(codes.NotFound, err.Error()) + default: + s.logger.Error("failed to get document", zap.String("id", req.Id), zap.String("err", err.Error())) + return resp, status.Error(codes.Internal, err.Error()) + } + } + fieldsBytes, err := json.Marshal(fields) + if err != nil { + s.logger.Error("failed to marshal fields map to bytes", zap.Any("id", req.Id), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + resp.Fields = fieldsBytes + + return resp, nil +} + +func (s *GRPCService) Set(ctx context.Context, req *protobuf.SetRequest) (*empty.Empty, error) { + resp := &empty.Empty{} + + if s.raftServer.raft.State() != raft.Leader { + clusterResp, err := s.Cluster(ctx, &empty.Empty{}) + if err != nil { + s.logger.Error("failed to get cluster info", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + c := s.peerClients[clusterResp.Cluster.Leader] + if err = c.Set(req); err != nil { + s.logger.Error("failed to forward request to leader", zap.String("grpc_address", c.Target()), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil + } + + if err := s.raftServer.Set(req); err != nil { + s.logger.Error("failed to index document", zap.Any("id", req.Id), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) Delete(ctx context.Context, req *protobuf.DeleteRequest) (*empty.Empty, error) { + resp := &empty.Empty{} + + if s.raftServer.raft.State() != raft.Leader { + clusterResp, err := s.Cluster(ctx, &empty.Empty{}) + if err != nil { + s.logger.Error("failed to get cluster info", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + c := s.peerClients[clusterResp.Cluster.Leader] + if err = c.Delete(req); err != nil { + s.logger.Error("failed to forward request to leader", zap.String("grpc_address", c.Target()), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil + } + + if err := s.raftServer.Delete(req); err != nil { + s.logger.Error("failed to delete document", zap.String("id", req.Id), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) BulkIndex(ctx context.Context, req *protobuf.BulkIndexRequest) (*protobuf.BulkIndexResponse, error) { + resp := &protobuf.BulkIndexResponse{} + + if s.raftServer.raft.State() != raft.Leader { + clusterResp, err := s.Cluster(ctx, &empty.Empty{}) + if err != nil { + s.logger.Error("failed to get cluster info", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + c := s.peerClients[clusterResp.Cluster.Leader] + return c.BulkIndex(req) + } + + if err := s.raftServer.BulkIndex(req); err != nil { + s.logger.Error("failed to index documents in bulk", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) BulkDelete(ctx context.Context, req *protobuf.BulkDeleteRequest) (*protobuf.BulkDeleteResponse, error) { + resp := &protobuf.BulkDeleteResponse{} + + if s.raftServer.raft.State() != raft.Leader { + clusterResp, err := s.Cluster(ctx, &empty.Empty{}) + if err != nil { + s.logger.Error("failed to get cluster info", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + c := s.peerClients[clusterResp.Cluster.Leader] + return c.BulkDelete(req) + } + + if err := s.raftServer.BulkDelete(req); err != nil { + s.logger.Error("failed to delete documents in bulk", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) Search(ctx context.Context, req *protobuf.SearchRequest) (*protobuf.SearchResponse, error) { + resp := &protobuf.SearchResponse{} + + searchRequest := &bleve.SearchRequest{} + if err := json.Unmarshal(req.SearchRequest, searchRequest); err != nil { + s.logger.Error("failed to unmarshal bytes to search request", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + searchResult, err := s.raftServer.Search(searchRequest) + if err != nil { + s.logger.Error("failed to search documents", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + searchResultBytes, err := json.Marshal(searchResult) + if err != nil { + s.logger.Error("failed to marshal search result to bytes", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + resp.SearchResult = searchResultBytes + + return resp, nil +} + +func (s *GRPCService) Mapping(ctx context.Context, req *empty.Empty) (*protobuf.MappingResponse, error) { + resp := &protobuf.MappingResponse{} + + var err error + + resp, err = s.raftServer.Mapping() + if err != nil { + s.logger.Error("failed to get document", zap.String("err", err.Error())) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) Watch(req *empty.Empty, server protobuf.Index_WatchServer) error { + chans := make(chan protobuf.WatchResponse) + + s.watchMutex.Lock() + s.watchChans[chans] = struct{}{} + s.watchMutex.Unlock() + + defer func() { + s.watchMutex.Lock() + delete(s.watchChans, chans) + s.watchMutex.Unlock() + close(chans) + }() + + for resp := range chans { + if err := server.Send(&resp); err != nil { + s.logger.Error("failed to send watch data", zap.String("event", resp.Event.String()), zap.Error(err)) + return status.Error(codes.Internal, err.Error()) + } + } + + return nil +} + +func (s *GRPCService) Metrics(ctx context.Context, req *empty.Empty) (*protobuf.MetricsResponse, error) { + resp := &protobuf.MetricsResponse{} + + var err error + + gather, err := metric.Registry.Gather() + if err != nil { + s.logger.Error("failed to get gather", zap.Error(err)) + } + out := &bytes.Buffer{} + for _, mf := range gather { + if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { + s.logger.Error("failed to parse metric family", zap.Error(err)) + } + } + + resp.Metrics = out.Bytes() + + return resp, nil +} diff --git a/server/raft_fsm.go b/server/raft_fsm.go new file mode 100644 index 0000000..2f69a94 --- /dev/null +++ b/server/raft_fsm.go @@ -0,0 +1,400 @@ +package server + +import ( + "encoding/json" + "io" + "io/ioutil" + "sync" + "time" + + "github.com/blevesearch/bleve" + "github.com/blevesearch/bleve/mapping" + "github.com/golang/protobuf/proto" + "github.com/hashicorp/raft" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/storage" + "go.uber.org/zap" +) + +type ApplyResponse struct { + count int + error error +} + +type RaftFSM struct { + logger *zap.Logger + + index *storage.Index + metadata map[string]*protobuf.Metadata + nodesMutex sync.RWMutex + + applyCh chan *protobuf.Event +} + +func NewRaftFSM(path string, indexMapping *mapping.IndexMappingImpl, logger *zap.Logger) (*RaftFSM, error) { + index, err := storage.NewIndex(path, indexMapping, logger) + if err != nil { + logger.Error("failed to create index store", zap.String("path", path), zap.Error(err)) + return nil, err + } + + return &RaftFSM{ + logger: logger, + index: index, + metadata: make(map[string]*protobuf.Metadata, 0), + applyCh: make(chan *protobuf.Event, 1024), + }, nil +} + +func (f *RaftFSM) Close() error { + f.applyCh <- nil + f.logger.Info("apply channel has closed") + + if err := f.index.Close(); err != nil { + f.logger.Error("failed to close index store", zap.Error(err)) + return err + } + + f.logger.Info("Index has closed") + + return nil +} + +func (f *RaftFSM) get(id string) (map[string]interface{}, error) { + return f.index.Get(id) +} + +func (f *RaftFSM) search(searchRequest *bleve.SearchRequest) (*bleve.SearchResult, error) { + return f.index.Search(searchRequest) +} + +func (f *RaftFSM) set(id string, fields map[string]interface{}) error { + return f.index.Index(id, fields) +} + +func (f *RaftFSM) delete(id string) error { + return f.index.Delete(id) +} + +func (f *RaftFSM) bulkIndex(docs []map[string]interface{}) (int, error) { + return f.index.BulkIndex(docs) +} + +func (f *RaftFSM) bulkDelete(ids []string) (int, error) { + return f.index.BulkDelete(ids) +} + +func (f *RaftFSM) getMetadata(id string) *protobuf.Metadata { + if metadata, exists := f.metadata[id]; exists { + return metadata + } else { + f.logger.Debug("metadata not found", zap.String("id", id)) + return nil + } +} + +func (f *RaftFSM) setMetadata(id string, metadata *protobuf.Metadata) error { + f.nodesMutex.Lock() + defer f.nodesMutex.Unlock() + + f.metadata[id] = metadata + + return nil +} + +func (f *RaftFSM) deleteMetadata(id string) error { + f.nodesMutex.Lock() + defer f.nodesMutex.Unlock() + + if _, exists := f.metadata[id]; exists { + delete(f.metadata, id) + } + + return nil +} + +func (f *RaftFSM) Apply(l *raft.Log) interface{} { + var event protobuf.Event + err := proto.Unmarshal(l.Data, &event) + if err != nil { + f.logger.Error("failed to unmarshal message bytes to KVS command", zap.Error(err)) + return err + } + + switch event.Type { + case protobuf.Event_Join: + data, err := marshaler.MarshalAny(event.Data) + if err != nil { + f.logger.Error("failed to marshal to request from KVS command request", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + if data == nil { + err = errors.ErrNil + f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + req := data.(*protobuf.SetMetadataRequest) + + if err := f.setMetadata(req.Id, req.Metadata); err != nil { + return &ApplyResponse{error: err} + } + + f.applyCh <- &event + + return &ApplyResponse{} + case protobuf.Event_Leave: + data, err := marshaler.MarshalAny(event.Data) + if err != nil { + f.logger.Error("failed to marshal to request from KVS command request", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + if data == nil { + err = errors.ErrNil + f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + req := *data.(*protobuf.DeleteMetadataRequest) + + if err := f.deleteMetadata(req.Id); err != nil { + return &ApplyResponse{error: err} + } + + f.applyCh <- &event + + return &ApplyResponse{} + case protobuf.Event_Set: + data, err := marshaler.MarshalAny(event.Data) + if err != nil { + f.logger.Error("failed to marshal event data to set request", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + if data == nil { + err = errors.ErrNil + f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + req := *data.(*protobuf.SetRequest) + + var fields map[string]interface{} + if err := json.Unmarshal(req.Fields, &fields); err != nil { + return &ApplyResponse{error: err} + } + + if err := f.set(req.Id, fields); err != nil { + return &ApplyResponse{error: err} + } + + f.applyCh <- &event + + return &ApplyResponse{} + case protobuf.Event_Delete: + data, err := marshaler.MarshalAny(event.Data) + if err != nil { + f.logger.Error("failed to marshal event data to delete request", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + if data == nil { + err = errors.ErrNil + f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + req := *data.(*protobuf.DeleteRequest) + + if err := f.delete(req.Id); err != nil { + return &ApplyResponse{error: err} + } + + f.applyCh <- &event + + return &ApplyResponse{} + case protobuf.Event_BulkIndex: + data, err := marshaler.MarshalAny(event.Data) + if err != nil { + f.logger.Error("failed to marshal event data to set request", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{count: -1, error: nil} + } + if data == nil { + err = errors.ErrNil + f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{count: -1, error: nil} + } + req := *data.(*protobuf.BulkIndexRequest) + + docs := make([]map[string]interface{}, 0) + for _, r := range req.Requests { + var fields map[string]interface{} + if err := json.Unmarshal(r.Fields, &fields); err != nil { + f.logger.Error("failed to unmarshal bytes to map", zap.String("id", r.Id), zap.Error(err)) + continue + } + + doc := map[string]interface{}{ + "id": r.Id, + "fields": fields, + } + docs = append(docs, doc) + } + + count, err := f.bulkIndex(docs) + if err != nil { + return &ApplyResponse{count: count, error: err} + } + + f.applyCh <- &event + + return &ApplyResponse{count: count, error: nil} + case protobuf.Event_BulkDelete: + data, err := marshaler.MarshalAny(event.Data) + if err != nil { + f.logger.Error("failed to marshal event data to set request", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{count: -1, error: nil} + } + if data == nil { + err = errors.ErrNil + f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{count: -1, error: nil} + } + req := *data.(*protobuf.BulkDeleteRequest) + + ids := make([]string, 0) + for _, r := range req.Requests { + ids = append(ids, r.Id) + } + + count, err := f.bulkDelete(ids) + if err != nil { + return &ApplyResponse{count: count, error: err} + } + + f.applyCh <- &event + + return &ApplyResponse{count: count, error: nil} + default: + err = errors.ErrUnsupportedEvent + f.logger.Error("unsupported command", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } +} + +func (f *RaftFSM) Stats() map[string]interface{} { + return f.index.Stats() +} + +func (f *RaftFSM) Mapping() *mapping.IndexMappingImpl { + return f.index.Mapping() +} + +func (f *RaftFSM) Snapshot() (raft.FSMSnapshot, error) { + return &KVSFSMSnapshot{ + index: f.index, + logger: f.logger, + }, nil +} + +func (f *RaftFSM) Restore(rc io.ReadCloser) error { + start := time.Now() + + f.logger.Info("start to restore items") + + defer func() { + err := rc.Close() + if err != nil { + f.logger.Error("failed to close reader", zap.Error(err)) + } + }() + + data, err := ioutil.ReadAll(rc) + if err != nil { + f.logger.Error("failed to open reader", zap.Error(err)) + return err + } + + count := uint64(0) + + buff := proto.NewBuffer(data) + for { + doc := &protobuf.Document{} + err = buff.DecodeMessage(doc) + if err == io.ErrUnexpectedEOF { + f.logger.Debug("reached the EOF", zap.Error(err)) + break + } + if err != nil { + f.logger.Error("failed to read document", zap.Error(err)) + return err + } + + var fields map[string]interface{} + if err := json.Unmarshal(doc.Fields, &fields); err != nil { + f.logger.Error("failed to unmarshal fields bytes to map", zap.Error(err)) + continue + } + + // apply item to store + if err = f.index.Index(doc.Id, fields); err != nil { + f.logger.Error("failed to index document", zap.Error(err)) + continue + } + + f.logger.Debug("document restored", zap.String("id", doc.Id)) + count = count + 1 + } + + f.logger.Info("finished to restore items", zap.Uint64("count", count), zap.Float64("time", float64(time.Since(start))/float64(time.Second))) + + return nil +} + +// --------------------- + +type KVSFSMSnapshot struct { + index *storage.Index + logger *zap.Logger +} + +func (f *KVSFSMSnapshot) Persist(sink raft.SnapshotSink) error { + start := time.Now() + + f.logger.Info("start to persist items") + + defer func() { + if err := sink.Close(); err != nil { + f.logger.Error("failed to close sink", zap.Error(err)) + } + }() + + ch := f.index.SnapshotItems() + + count := uint64(0) + + for { + doc := <-ch + if doc == nil { + f.logger.Debug("channel closed") + break + } + + count = count + 1 + + buff := proto.NewBuffer([]byte{}) + if err := buff.EncodeMessage(doc); err != nil { + f.logger.Error("failed to encode document", zap.Error(err)) + return err + } + + if _, err := sink.Write(buff.Bytes()); err != nil { + f.logger.Error("failed to write document", zap.Error(err)) + return err + } + } + + f.logger.Info("finished to persist items", zap.Uint64("count", count), zap.Float64("time", float64(time.Since(start))/float64(time.Second))) + + return nil +} + +func (f *KVSFSMSnapshot) Release() { + f.logger.Info("release") +} diff --git a/server/raft_fsm_test.go b/server/raft_fsm_test.go new file mode 100644 index 0000000..865f623 --- /dev/null +++ b/server/raft_fsm_test.go @@ -0,0 +1,743 @@ +package server + +import ( + "encoding/json" + "os" + "path/filepath" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + "github.com/hashicorp/raft" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/log" + "github.com/mosuka/blast/mapping" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/util" +) + +func Test_RaftFSM_Close(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_Set(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + id := "1" + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + + if ret := fsm.set(id, fields); ret != nil { + t.Fatal("failed to index document") + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_Get(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + id := "1" + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + + if ret := fsm.set(id, fields); ret != nil { + t.Fatal("failed to index document") + } + + f, err := fsm.get(id) + if err != nil { + t.Fatalf("%v", err) + } + if fields["title"].(string) != f["title"].(string) { + t.Fatalf("expected content to see %v, saw %v", fields["title"].(string), f["title"].(string)) + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_Delete(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + id := "1" + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + + if ret := fsm.set(id, fields); ret != nil { + t.Fatal("failed to index document") + } + + f, err := fsm.get(id) + if err != nil { + t.Fatalf("%v", err) + } + if fields["title"].(string) != f["title"].(string) { + t.Fatalf("expected content to see %v, saw %v", fields["title"].(string), f["title"].(string)) + } + + if ret := fsm.delete(id); ret != nil { + t.Fatal("failed to delete document") + } + + f, err = fsm.get(id) + if err != nil { + switch err { + case errors.ErrNotFound: + // ok + default: + t.Fatal("failed to get document") + } + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_SetMetadata(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + id := "node1" + metadata := &protobuf.Metadata{ + GrpcAddress: ":9000", + HttpAddress: ":8000", + } + + if ret := fsm.setMetadata(id, metadata); ret != nil { + t.Fatal("failed to index document") + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_GetMetadata(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + id := "node1" + metadata := &protobuf.Metadata{ + GrpcAddress: ":9000", + HttpAddress: ":8000", + } + + if ret := fsm.setMetadata(id, metadata); ret != nil { + t.Fatal("failed to index document") + } + + m := fsm.getMetadata(id) + if metadata.GrpcAddress != m.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", metadata.GrpcAddress, m.GrpcAddress) + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_DeleteMetadata(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + id := "node1" + metadata := &protobuf.Metadata{ + GrpcAddress: ":9000", + HttpAddress: ":8000", + } + + if ret := fsm.setMetadata(id, metadata); ret != nil { + t.Fatal("failed to set metadata") + } + + m := fsm.getMetadata(id) + if metadata.GrpcAddress != m.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", metadata.GrpcAddress, m.GrpcAddress) + } + + if ret := fsm.deleteMetadata(id); ret != nil { + t.Fatal("failed to delete metadata") + } + + m = fsm.getMetadata(id) + if m != nil { + t.Fatalf("expected content to see %v, saw %v", nil, m.GrpcAddress) + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_ApplyJoin(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + data := &protobuf.SetMetadataRequest{ + Id: "node1", + Metadata: &protobuf.Metadata{ + GrpcAddress: ":9000", + HttpAddress: ":8000", + }, + } + + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(data, dataAny); err != nil { + t.Fatal("failed to unmarshal data to any") + } + + event := &protobuf.Event{ + Type: protobuf.Event_Join, + Data: dataAny, + } + + eventData, err := proto.Marshal(event) + if err != nil { + t.Fatal("failed to marshal event to bytes") + } + + raftLog := &raft.Log{ + Data: eventData, + } + + ret := fsm.Apply(raftLog) + if ret.(*ApplyResponse).error != nil { + t.Fatal("failed to apply data") + } + + m := fsm.getMetadata(data.Id) + if data.Metadata.GrpcAddress != m.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", data.Metadata.GrpcAddress, m.GrpcAddress) + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_ApplyLeave(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + // apply joni + setData := &protobuf.SetMetadataRequest{ + Id: "node1", + Metadata: &protobuf.Metadata{ + GrpcAddress: ":9000", + HttpAddress: ":8000", + }, + } + + setDataAny := &any.Any{} + if err := marshaler.UnmarshalAny(setData, setDataAny); err != nil { + t.Fatal("failed to unmarshal data to any") + } + + joinEvent := &protobuf.Event{ + Type: protobuf.Event_Join, + Data: setDataAny, + } + + joinEventData, err := proto.Marshal(joinEvent) + if err != nil { + t.Fatal("failed to marshal event to bytes") + } + + joinRaftLog := &raft.Log{ + Data: joinEventData, + } + + ret := fsm.Apply(joinRaftLog) + if ret.(*ApplyResponse).error != nil { + t.Fatal("failed to apply data") + } + + m := fsm.getMetadata(setData.Id) + if setData.Metadata.GrpcAddress != m.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", setData.Metadata.GrpcAddress, m.GrpcAddress) + } + + // apply leave + deleteData := &protobuf.DeleteMetadataRequest{ + Id: "node1", + } + + deleteDataAny := &any.Any{} + if err := marshaler.UnmarshalAny(deleteData, deleteDataAny); err != nil { + t.Fatal("failed to unmarshal data to any") + } + + leaveEvent := &protobuf.Event{ + Type: protobuf.Event_Leave, + Data: deleteDataAny, + } + + leaveEventData, err := proto.Marshal(leaveEvent) + if err != nil { + t.Fatal("failed to marshal event to bytes") + } + + leaveRaftLog := &raft.Log{ + Data: leaveEventData, + } + + ret = fsm.Apply(leaveRaftLog) + if ret.(*ApplyResponse).error != nil { + t.Fatal("failed to apply data") + } + + m = fsm.getMetadata(deleteData.Id) + if m != nil { + t.Fatalf("expected content to see %v, saw %v", nil, m.GrpcAddress) + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_ApplySet(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + fieldsBytes, err := json.Marshal(fields) + if err != nil { + t.Fatalf("%v", err) + } + + // apply set + setData := &protobuf.SetRequest{ + Id: "1", + Fields: fieldsBytes, + } + + setDataAny := &any.Any{} + if err := marshaler.UnmarshalAny(setData, setDataAny); err != nil { + t.Fatal("failed to unmarshal data to any") + } + + setEvent := &protobuf.Event{ + Type: protobuf.Event_Set, + Data: setDataAny, + } + + setEventData, err := proto.Marshal(setEvent) + if err != nil { + t.Fatal("failed to marshal event to bytes") + } + + setRaftLog := &raft.Log{ + Data: setEventData, + } + + ret := fsm.Apply(setRaftLog) + if ret.(*ApplyResponse).error != nil { + t.Fatal("failed to apply data") + } + + f, err := fsm.get(setData.Id) + if err != nil { + t.Fatal("failed to get document") + } + if fields["title"] != f["title"] { + t.Fatalf("expected content to see %v, saw %v", fields["title"], f["title"]) + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_ApplyDelete(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + fieldsBytes, err := json.Marshal(fields) + if err != nil { + t.Fatalf("%v", err) + } + + // apply set + setData := &protobuf.SetRequest{ + Id: "1", + Fields: fieldsBytes, + } + + setDataAny := &any.Any{} + if err := marshaler.UnmarshalAny(setData, setDataAny); err != nil { + t.Fatal("failed to unmarshal data to any") + } + + setEvent := &protobuf.Event{ + Type: protobuf.Event_Set, + Data: setDataAny, + } + + setEventData, err := proto.Marshal(setEvent) + if err != nil { + t.Fatal("failed to marshal event to bytes") + } + + setRaftLog := &raft.Log{ + Data: setEventData, + } + + ret := fsm.Apply(setRaftLog) + if ret.(*ApplyResponse).error != nil { + t.Fatal("failed to apply data") + } + + f, err := fsm.get(setData.Id) + if err != nil { + t.Fatal("failed to get document") + } + if fields["title"] != f["title"] { + t.Fatalf("expected content to see %v, saw %v", fields["title"], f["title"]) + } + + // apply delete + deleteData := &protobuf.DeleteRequest{ + Id: "1", + } + + deleteDataAny := &any.Any{} + if err := marshaler.UnmarshalAny(deleteData, deleteDataAny); err != nil { + t.Fatal("failed to unmarshal data to any") + } + + deleteEvent := &protobuf.Event{ + Type: protobuf.Event_Delete, + Data: deleteDataAny, + } + + deleteEventData, err := proto.Marshal(deleteEvent) + if err != nil { + t.Fatal("failed to marshal event to bytes") + } + + deleteRaftLog := &raft.Log{ + Data: deleteEventData, + } + + ret = fsm.Apply(deleteRaftLog) + if ret.(*ApplyResponse).error != nil { + t.Fatal("failed to apply data") + } + + f, err = fsm.get(deleteData.Id) + if err != nil { + switch err { + case errors.ErrNotFound: + // ok + default: + t.Fatal("failed to get document") + } + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} diff --git a/server/raft_server.go b/server/raft_server.go new file mode 100644 index 0000000..ba27747 --- /dev/null +++ b/server/raft_server.go @@ -0,0 +1,857 @@ +package server + +import ( + "encoding/json" + "io/ioutil" + "net" + "os" + "path/filepath" + "strconv" + "time" + + raftbadgerdb "github.com/bbva/raft-badger" + "github.com/blevesearch/bleve" + "github.com/blevesearch/bleve/mapping" + "github.com/dgraph-io/badger/v2" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + "github.com/hashicorp/raft" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/metric" + "github.com/mosuka/blast/protobuf" + "go.uber.org/zap" +) + +type RaftServer struct { + id string + raftAddress string + dataDirectory string + bootstrap bool + logger *zap.Logger + + fsm *RaftFSM + + transport *raft.NetworkTransport + raft *raft.Raft + + watchClusterStopCh chan struct{} + watchClusterDoneCh chan struct{} + + applyCh chan *protobuf.Event +} + +func NewRaftServer(id string, raftAddress string, dataDirectory string, indexMapping *mapping.IndexMappingImpl, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { + indexPath := filepath.Join(dataDirectory, "index") + fsm, err := NewRaftFSM(indexPath, indexMapping, logger) + if err != nil { + logger.Error("failed to create FSM", zap.String("index_path", indexPath), zap.Error(err)) + return nil, err + } + + return &RaftServer{ + id: id, + raftAddress: raftAddress, + dataDirectory: dataDirectory, + bootstrap: bootstrap, + fsm: fsm, + logger: logger, + + watchClusterStopCh: make(chan struct{}), + watchClusterDoneCh: make(chan struct{}), + + applyCh: make(chan *protobuf.Event, 1024), + }, nil +} + +func (s *RaftServer) Start() error { + config := raft.DefaultConfig() + config.LocalID = raft.ServerID(s.id) + config.SnapshotThreshold = 1024 + config.LogOutput = ioutil.Discard + + addr, err := net.ResolveTCPAddr("tcp", s.raftAddress) + if err != nil { + s.logger.Error("failed to resolve TCP address", zap.String("raft_address", s.raftAddress), zap.Error(err)) + return err + } + + s.transport, err = raft.NewTCPTransport(s.raftAddress, addr, 3, 10*time.Second, ioutil.Discard) + if err != nil { + s.logger.Error("failed to create TCP transport", zap.String("raft_address", s.raftAddress), zap.Error(err)) + return err + } + + // create snapshot store + snapshotStore, err := raft.NewFileSnapshotStore(s.dataDirectory, 2, ioutil.Discard) + if err != nil { + s.logger.Error("failed to create file snapshot store", zap.String("path", s.dataDirectory), zap.Error(err)) + return err + } + + logStorePath := filepath.Join(s.dataDirectory, "raft", "log") + err = os.MkdirAll(logStorePath, 0755) + if err != nil { + s.logger.Fatal(err.Error()) + return err + } + logStoreBadgerOpts := badger.DefaultOptions(logStorePath) + logStoreBadgerOpts.ValueDir = logStorePath + logStoreBadgerOpts.SyncWrites = false + logStoreBadgerOpts.Logger = nil + logStoreOpts := raftbadgerdb.Options{ + Path: logStorePath, + BadgerOptions: &logStoreBadgerOpts, + } + raftLogStore, err := raftbadgerdb.New(logStoreOpts) + if err != nil { + s.logger.Fatal(err.Error()) + return err + } + + stableStorePath := filepath.Join(s.dataDirectory, "raft", "stable") + err = os.MkdirAll(stableStorePath, 0755) + if err != nil { + s.logger.Fatal(err.Error()) + return err + } + stableStoreBadgerOpts := badger.DefaultOptions(stableStorePath) + stableStoreBadgerOpts.ValueDir = stableStorePath + stableStoreBadgerOpts.SyncWrites = false + stableStoreBadgerOpts.Logger = nil + stableStoreOpts := raftbadgerdb.Options{ + Path: stableStorePath, + BadgerOptions: &stableStoreBadgerOpts, + } + raftStableStore, err := raftbadgerdb.New(stableStoreOpts) + if err != nil { + s.logger.Fatal(err.Error()) + return err + } + + // create raft + s.raft, err = raft.NewRaft(config, s.fsm, raftLogStore, raftStableStore, snapshotStore, s.transport) + if err != nil { + s.logger.Error("failed to create raft", zap.Any("config", config), zap.Error(err)) + return err + } + + if s.bootstrap { + configuration := raft.Configuration{ + Servers: []raft.Server{ + { + ID: config.LocalID, + Address: s.transport.LocalAddr(), + }, + }, + } + s.raft.BootstrapCluster(configuration) + } + + go func() { + s.startWatchCluster(500 * time.Millisecond) + }() + + s.logger.Info("Raft server started", zap.String("raft_address", s.raftAddress)) + return nil +} + +func (s *RaftServer) Stop() error { + s.applyCh <- nil + s.logger.Info("apply channel has closed") + + s.stopWatchCluster() + + if err := s.fsm.Close(); err != nil { + s.logger.Error("failed to close FSM", zap.Error(err)) + } + s.logger.Info("Raft FSM Closed") + + if future := s.raft.Shutdown(); future.Error() != nil { + s.logger.Info("failed to shutdown Raft", zap.Error(future.Error())) + } + s.logger.Info("Raft has shutdown", zap.String("raft_address", s.raftAddress)) + + return nil +} + +func (s *RaftServer) startWatchCluster(checkInterval time.Duration) { + s.logger.Info("start to update cluster info") + + defer func() { + close(s.watchClusterDoneCh) + }() + + ticker := time.NewTicker(checkInterval) + defer ticker.Stop() + + timeout := 60 * time.Second + if err := s.WaitForDetectLeader(timeout); err != nil { + if err == errors.ErrTimeout { + s.logger.Error("leader detection timed out", zap.Duration("timeout", timeout), zap.Error(err)) + } else { + s.logger.Error("failed to detect leader", zap.Error(err)) + } + } + + for { + select { + case <-s.watchClusterStopCh: + s.logger.Info("received a request to stop updating a cluster") + return + case <-s.raft.LeaderCh(): + s.logger.Info("became a leader", zap.String("leaderAddr", string(s.raft.Leader()))) + case event := <-s.fsm.applyCh: + s.applyCh <- event + case <-ticker.C: + raftStats := s.raft.Stats() + + switch raftStats["state"] { + case "Follower": + metric.RaftStateMetric.WithLabelValues(s.id).Set(float64(raft.Follower)) + case "Candidate": + metric.RaftStateMetric.WithLabelValues(s.id).Set(float64(raft.Candidate)) + case "Leader": + metric.RaftStateMetric.WithLabelValues(s.id).Set(float64(raft.Leader)) + case "Shutdown": + metric.RaftStateMetric.WithLabelValues(s.id).Set(float64(raft.Shutdown)) + } + + if term, err := strconv.ParseFloat(raftStats["term"], 64); err == nil { + metric.RaftTermMetric.WithLabelValues(s.id).Set(term) + } + + if lastLogIndex, err := strconv.ParseFloat(raftStats["last_log_index"], 64); err == nil { + metric.RaftLastLogIndexMetric.WithLabelValues(s.id).Set(lastLogIndex) + } + + if lastLogTerm, err := strconv.ParseFloat(raftStats["last_log_term"], 64); err == nil { + metric.RaftLastLogTermMetric.WithLabelValues(s.id).Set(lastLogTerm) + } + + if commitIndex, err := strconv.ParseFloat(raftStats["commit_index"], 64); err == nil { + metric.RaftCommitIndexMetric.WithLabelValues(s.id).Set(commitIndex) + } + + if appliedIndex, err := strconv.ParseFloat(raftStats["applied_index"], 64); err == nil { + metric.RaftAppliedIndexMetric.WithLabelValues(s.id).Set(appliedIndex) + } + + if fsmPending, err := strconv.ParseFloat(raftStats["fsm_pending"], 64); err == nil { + metric.RaftFsmPendingMetric.WithLabelValues(s.id).Set(fsmPending) + } + + if lastSnapshotIndex, err := strconv.ParseFloat(raftStats["last_snapshot_index"], 64); err == nil { + metric.RaftLastSnapshotIndexMetric.WithLabelValues(s.id).Set(lastSnapshotIndex) + } + + if lastSnapshotTerm, err := strconv.ParseFloat(raftStats["last_snapshot_term"], 64); err == nil { + metric.RaftLastSnapshotTermMetric.WithLabelValues(s.id).Set(lastSnapshotTerm) + } + + if latestConfigurationIndex, err := strconv.ParseFloat(raftStats["latest_configuration_index"], 64); err == nil { + metric.RaftLatestConfigurationIndexMetric.WithLabelValues(s.id).Set(latestConfigurationIndex) + } + + if numPeers, err := strconv.ParseFloat(raftStats["num_peers"], 64); err == nil { + metric.RaftNumPeersMetric.WithLabelValues(s.id).Set(numPeers) + } + + if lastContact, err := strconv.ParseFloat(raftStats["last_contact"], 64); err == nil { + metric.RaftLastContactMetric.WithLabelValues(s.id).Set(lastContact) + } + + if nodes, err := s.Nodes(); err == nil { + metric.RaftNumNodesMetric.WithLabelValues(s.id).Set(float64(len(nodes))) + } + + indexStats := s.fsm.Stats() + + tmpIndex := indexStats["index"].(map[string]interface{}) + + metric.IndexCurOnDiskBytesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["CurOnDiskBytes"].(uint64))) + + metric.IndexCurOnDiskFilesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["CurOnDiskFiles"].(uint64))) + + metric.IndexCurRootEpochMetric.WithLabelValues(s.id).Set(float64(tmpIndex["CurRootEpoch"].(uint64))) + + metric.IndexLastMergedEpochMetric.WithLabelValues(s.id).Set(float64(tmpIndex["LastMergedEpoch"].(uint64))) + + metric.IndexLastPersistedEpochMetric.WithLabelValues(s.id).Set(float64(tmpIndex["LastPersistedEpoch"].(uint64))) + + metric.IndexMaxBatchIntroTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["MaxBatchIntroTime"].(uint64))) + + metric.IndexMaxFileMergeZapTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["MaxFileMergeZapTime"].(uint64))) + + metric.IndexMaxMemMergeZapTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["MaxMemMergeZapTime"].(uint64))) + + metric.IndexTotAnalysisTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotAnalysisTime"].(uint64))) + + metric.IndexTotBatchIntroTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotBatchIntroTime"].(uint64))) + + metric.IndexTotBatchesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotBatches"].(uint64))) + + metric.IndexTotBatchesEmptyMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotBatchesEmpty"].(uint64))) + + metric.IndexTotDeletesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotDeletes"].(uint64))) + + metric.IndexTotFileMergeIntroductionsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeIntroductions"].(uint64))) + + metric.IndexTotFileMergeIntroductionsDoneMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeIntroductionsDone"].(uint64))) + + metric.IndexTotFileMergeIntroductionsSkippedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeIntroductionsSkipped"].(uint64))) + + metric.IndexTotFileMergeLoopBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeLoopBeg"].(uint64))) + + metric.IndexTotFileMergeLoopEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeLoopEnd"].(uint64))) + + metric.IndexTotFileMergeLoopErrMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeLoopErr"].(uint64))) + + metric.IndexTotFileMergePlanMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlan"].(uint64))) + + metric.IndexTotFileMergePlanErrMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanErr"].(uint64))) + + metric.IndexTotFileMergePlanNoneMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanNone"].(uint64))) + + metric.IndexTotFileMergePlanOkMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanOk"].(uint64))) + + metric.IndexTotFileMergePlanTasksMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanTasks"].(uint64))) + + metric.IndexTotFileMergePlanTasksDoneMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanTasksDone"].(uint64))) + + metric.IndexTotFileMergePlanTasksErrMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanTasksErr"].(uint64))) + + metric.IndexTotFileMergePlanTasksSegmentsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanTasksSegments"].(uint64))) + + metric.IndexTotFileMergePlanTasksSegmentsEmptyMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanTasksSegmentsEmpty"].(uint64))) + + metric.IndexTotFileMergeSegmentsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeSegments"].(uint64))) + + metric.IndexTotFileMergeSegmentsEmptyMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeSegmentsEmpty"].(uint64))) + + metric.IndexTotFileMergeWrittenBytesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeWrittenBytes"].(uint64))) + + metric.IndexTotFileMergeZapBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeZapBeg"].(uint64))) + + metric.IndexTotFileMergeZapEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeZapEnd"].(uint64))) + + metric.IndexTotFileMergeZapTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeZapTime"].(uint64))) + + metric.IndexTotFileSegmentsAtRootMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileSegmentsAtRoot"].(uint64))) + + metric.IndexTotIndexTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIndexTime"].(uint64))) + + metric.IndexTotIndexedPlainTextBytesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIndexedPlainTextBytes"].(uint64))) + + metric.IndexTotIntroduceLoopMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceLoop"].(uint64))) + + metric.IndexTotIntroduceMergeBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceMergeBeg"].(uint64))) + + metric.IndexTotIntroduceMergeEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceMergeEnd"].(uint64))) + + metric.IndexTotIntroducePersistBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroducePersistBeg"].(uint64))) + + metric.IndexTotIntroducePersistEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroducePersistEnd"].(uint64))) + + metric.IndexTotIntroduceRevertBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceRevertBeg"].(uint64))) + + metric.IndexTotIntroduceRevertEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceRevertEnd"].(uint64))) + + metric.IndexTotIntroduceSegmentBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceSegmentBeg"].(uint64))) + + metric.IndexTotIntroduceSegmentEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceSegmentEnd"].(uint64))) + + metric.IndexTotIntroducedItemsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroducedItems"].(uint64))) + + metric.IndexTotIntroducedSegmentsBatchMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroducedSegmentsBatch"].(uint64))) + + metric.IndexTotIntroducedSegmentsMergeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroducedSegmentsMerge"].(uint64))) + + metric.IndexTotItemsToPersistMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotItemsToPersist"].(uint64))) + + metric.IndexTotMemMergeBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeBeg"].(uint64))) + + metric.IndexTotMemMergeDoneMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeDone"].(uint64))) + + metric.IndexTotMemMergeErrMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeErr"].(uint64))) + + metric.IndexTotMemMergeSegmentsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeSegments"].(uint64))) + + metric.IndexTotMemMergeZapBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeZapBeg"].(uint64))) + + metric.IndexTotMemMergeZapEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeZapEnd"].(uint64))) + + metric.IndexTotMemMergeZapTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeZapTime"].(uint64))) + + metric.IndexTotMemorySegmentsAtRootMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemorySegmentsAtRoot"].(uint64))) + + metric.IndexTotOnErrorsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotOnErrors"].(uint64))) + + metric.IndexTotPersistLoopBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistLoopBeg"].(uint64))) + + metric.IndexTotPersistLoopEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistLoopEnd"].(uint64))) + + metric.IndexTotPersistLoopErrMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistLoopErr"].(uint64))) + + metric.IndexTotPersistLoopProgressMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistLoopProgress"].(uint64))) + + metric.IndexTotPersistLoopWaitMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistLoopWait"].(uint64))) + + metric.IndexTotPersistLoopWaitNotifiedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistLoopWaitNotified"].(uint64))) + + metric.IndexTotPersistedItemsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistedItems"].(uint64))) + + metric.IndexTotPersistedSegmentsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistedSegments"].(uint64))) + + metric.IndexTotPersisterMergerNapBreakMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersisterMergerNapBreak"].(uint64))) + + metric.IndexTotPersisterNapPauseCompletedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersisterNapPauseCompleted"].(uint64))) + + metric.IndexTotPersisterSlowMergerPauseMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersisterSlowMergerPause"].(uint64))) + + metric.IndexTotPersisterSlowMergerResumeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersisterSlowMergerResume"].(uint64))) + + metric.IndexTotTermSearchersFinishedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotTermSearchersFinished"].(uint64))) + + metric.IndexTotTermSearchersStartedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotTermSearchersStarted"].(uint64))) + + metric.IndexTotUpdatesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotUpdates"].(uint64))) + + metric.IndexAnalysisTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["analysis_time"].(uint64))) + + metric.IndexBatchesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["batches"].(uint64))) + + metric.IndexDeletesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["deletes"].(uint64))) + + metric.IndexErrorsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["errors"].(uint64))) + + metric.IndexIndexTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["index_time"].(uint64))) + + metric.IndexNumBytesUsedDiskMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_bytes_used_disk"].(uint64))) + + metric.IndexNumFilesOnDiskMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_files_on_disk"].(uint64))) + + metric.IndexNumItemsIntroducedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_items_introduced"].(uint64))) + + metric.IndexNumItemsPersistedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_items_persisted"].(uint64))) + + metric.IndexNumPersisterNapMergerBreakMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_persister_nap_merger_break"].(uint64))) + + metric.IndexNumPersisterNapPauseCompletedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_persister_nap_pause_completed"].(uint64))) + + metric.IndexNumPlainTextBytesIndexedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_plain_text_bytes_indexed"].(uint64))) + + metric.IndexNumRecsToPersistMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_recs_to_persist"].(uint64))) + + metric.IndexNumRootFilesegmentsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_root_filesegments"].(uint64))) + + metric.IndexNumRootMemorysegmentsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_root_memorysegments"].(uint64))) + + metric.IndexTermSearchersFinishedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["term_searchers_finished"].(uint64))) + + metric.IndexTermSearchersStartedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["term_searchers_started"].(uint64))) + + metric.IndexTotalCompactionWrittenBytesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["total_compaction_written_bytes"].(uint64))) + + metric.IndexUpdatesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["updates"].(uint64))) + + metric.SearchTimeMetric.WithLabelValues(s.id).Set(float64(indexStats["search_time"].(uint64))) + + metric.SearchesMetric.WithLabelValues(s.id).Set(float64(indexStats["searches"].(uint64))) + } + } +} + +func (s *RaftServer) stopWatchCluster() { + if s.watchClusterStopCh != nil { + s.logger.Info("send a request to stop updating a cluster") + close(s.watchClusterStopCh) + } + + s.logger.Info("wait for the cluster update to stop") + <-s.watchClusterDoneCh + s.logger.Info("the cluster update has been stopped") +} + +func (s *RaftServer) LeaderAddress(timeout time.Duration) (raft.ServerAddress, error) { + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + timer := time.NewTimer(timeout) + defer timer.Stop() + + for { + select { + case <-ticker.C: + leaderAddr := s.raft.Leader() + if leaderAddr != "" { + s.logger.Debug("detected a leader address", zap.String("raft_address", string(leaderAddr))) + return leaderAddr, nil + } + case <-timer.C: + err := errors.ErrTimeout + s.logger.Error("failed to detect leader address", zap.Error(err)) + return "", err + } + } +} + +func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { + leaderAddr, err := s.LeaderAddress(timeout) + if err != nil { + s.logger.Error("failed to get leader address", zap.Error(err)) + return "", err + } + + cf := s.raft.GetConfiguration() + if err = cf.Error(); err != nil { + s.logger.Error("failed to get Raft configuration", zap.Error(err)) + return "", err + } + + for _, server := range cf.Configuration().Servers { + if server.Address == leaderAddr { + s.logger.Info("detected a leader ID", zap.String("id", string(server.ID))) + return server.ID, nil + } + } + + err = errors.ErrNotFoundLeader + s.logger.Error("failed to detect leader ID", zap.Error(err)) + return "", err +} + +func (s *RaftServer) WaitForDetectLeader(timeout time.Duration) error { + if _, err := s.LeaderAddress(timeout); err != nil { + s.logger.Error("failed to wait for detect leader", zap.Error(err)) + return err + } + + return nil +} + +func (s *RaftServer) State() raft.RaftState { + return s.raft.State() +} + +func (s *RaftServer) StateStr() string { + return s.State().String() +} + +func (s *RaftServer) Exist(id string) (bool, error) { + exist := false + + cf := s.raft.GetConfiguration() + if err := cf.Error(); err != nil { + s.logger.Error("failed to get Raft configuration", zap.Error(err)) + return false, err + } + + for _, server := range cf.Configuration().Servers { + if server.ID == raft.ServerID(id) { + s.logger.Debug("node already joined the cluster", zap.String("id", id)) + exist = true + break + } + } + + return exist, nil +} + +func (s *RaftServer) getMetadata(id string) (*protobuf.Metadata, error) { + metadata := s.fsm.getMetadata(id) + if metadata == nil { + return nil, errors.ErrNotFound + } + + return metadata, nil +} + +func (s *RaftServer) setMetadata(id string, metadata *protobuf.Metadata) error { + data := &protobuf.SetMetadataRequest{ + Id: id, + Metadata: metadata, + } + + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(data, dataAny); err != nil { + s.logger.Error("failed to unmarshal request to the command data", zap.String("id", id), zap.Any("metadata", metadata), zap.Error(err)) + return err + } + + event := &protobuf.Event{ + Type: protobuf.Event_Join, + Data: dataAny, + } + + msg, err := proto.Marshal(event) + if err != nil { + s.logger.Error("failed to marshal the command into the bytes as message", zap.String("id", id), zap.Any("metadata", metadata), zap.Error(err)) + return err + } + + timeout := 60 * time.Second + if future := s.raft.Apply(msg, timeout); future.Error() != nil { + s.logger.Error("failed to apply message bytes", zap.Duration("timeout", timeout), zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) deleteMetadata(id string) error { + data := &protobuf.DeleteMetadataRequest{ + Id: id, + } + + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(data, dataAny); err != nil { + s.logger.Error("failed to unmarshal request to the command data", zap.String("id", id), zap.Error(err)) + return err + } + + event := &protobuf.Event{ + Type: protobuf.Event_Leave, + Data: dataAny, + } + + msg, err := proto.Marshal(event) + if err != nil { + s.logger.Error("failed to marshal the command into the bytes as the message", zap.String("id", id), zap.Error(err)) + return err + } + + timeout := 60 * time.Second + if future := s.raft.Apply(msg, timeout); future.Error() != nil { + s.logger.Error("failed to apply message bytes", zap.Duration("timeout", timeout), zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) Join(id string, node *protobuf.Node) error { + exist, err := s.Exist(id) + if err != nil { + return err + } + + if !exist { + if future := s.raft.AddVoter(raft.ServerID(id), raft.ServerAddress(node.RaftAddress), 0, 0); future.Error() != nil { + s.logger.Error("failed to add voter", zap.String("id", id), zap.String("raft_address", node.RaftAddress), zap.Error(future.Error())) + return future.Error() + } + s.logger.Info("node has successfully joined", zap.String("id", id), zap.String("raft_address", node.RaftAddress)) + } + + if err := s.setMetadata(id, node.Metadata); err != nil { + return err + } + + if exist { + return errors.ErrNodeAlreadyExists + } + + return nil +} + +func (s *RaftServer) Leave(id string) error { + exist, err := s.Exist(id) + if err != nil { + return err + } + + if exist { + if future := s.raft.RemoveServer(raft.ServerID(id), 0, 0); future.Error() != nil { + s.logger.Error("failed to remove server", zap.String("id", id), zap.Error(future.Error())) + return future.Error() + } + s.logger.Info("node has successfully left", zap.String("id", id)) + } + + if err = s.deleteMetadata(id); err != nil { + return err + } + + if !exist { + return errors.ErrNodeDoesNotExist + } + + return nil +} + +func (s *RaftServer) Node() (*protobuf.Node, error) { + nodes, err := s.Nodes() + if err != nil { + return nil, err + } + + node, ok := nodes[s.id] + if !ok { + return nil, errors.ErrNotFound + } + + node.State = s.StateStr() + + return node, nil +} + +func (s *RaftServer) Nodes() (map[string]*protobuf.Node, error) { + cf := s.raft.GetConfiguration() + if err := cf.Error(); err != nil { + s.logger.Error("failed to get Raft configuration", zap.Error(err)) + return nil, err + } + + nodes := make(map[string]*protobuf.Node, 0) + for _, server := range cf.Configuration().Servers { + metadata, _ := s.getMetadata(string(server.ID)) + + nodes[string(server.ID)] = &protobuf.Node{ + RaftAddress: string(server.Address), + Metadata: metadata, + } + } + + return nodes, nil +} + +func (s *RaftServer) Snapshot() error { + if future := s.raft.Snapshot(); future.Error() != nil { + s.logger.Error("failed to snapshot", zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) Get(id string) (map[string]interface{}, error) { + return s.fsm.get(id) +} + +func (s *RaftServer) Search(searchRequest *bleve.SearchRequest) (*bleve.SearchResult, error) { + return s.fsm.search(searchRequest) +} + +func (s *RaftServer) Set(req *protobuf.SetRequest) error { + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(req, dataAny); err != nil { + s.logger.Error("failed to unmarshal document map to any", zap.Error(err)) + return err + } + + event := &protobuf.Event{ + Type: protobuf.Event_Set, + Data: dataAny, + } + + msg, err := proto.Marshal(event) + if err != nil { + s.logger.Error("failed to marshal event to bytes", zap.Error(err)) + return err + } + + timeout := 60 * time.Second + if future := s.raft.Apply(msg, timeout); future.Error() != nil { + s.logger.Error("failed to apply message bytes", zap.Duration("timeout", timeout), zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) Delete(req *protobuf.DeleteRequest) error { + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(req, dataAny); err != nil { + s.logger.Error("failed to unmarshal id to any", zap.Error(err)) + return err + } + + c := &protobuf.Event{ + Type: protobuf.Event_Delete, + Data: dataAny, + } + + msg, err := proto.Marshal(c) + if err != nil { + s.logger.Error("failed to marshal event to bytes", zap.Error(err)) + return err + } + + timeout := 60 * time.Second + if future := s.raft.Apply(msg, timeout); future.Error() != nil { + s.logger.Error("failed to apply message bytes", zap.Duration("timeout", timeout), zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) BulkIndex(req *protobuf.BulkIndexRequest) error { + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(req, dataAny); err != nil { + s.logger.Error("failed to unmarshal bulk index request to any", zap.Error(err)) + return err + } + + event := &protobuf.Event{ + Type: protobuf.Event_BulkIndex, + Data: dataAny, + } + + msg, err := proto.Marshal(event) + if err != nil { + s.logger.Error("failed to marshal event to bytes", zap.Error(err)) + return err + } + + timeout := 60 * time.Second + if future := s.raft.Apply(msg, timeout); future.Error() != nil { + s.logger.Error("failed to apply message bytes", zap.Duration("timeout", timeout), zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) BulkDelete(req *protobuf.BulkDeleteRequest) error { + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(req, dataAny); err != nil { + s.logger.Error("failed to unmarshal set request to any", zap.Error(err)) + return err + } + + event := &protobuf.Event{ + Type: protobuf.Event_BulkDelete, + Data: dataAny, + } + + msg, err := proto.Marshal(event) + if err != nil { + s.logger.Error("failed to marshal event to bytes", zap.Error(err)) + return err + } + + timeout := 60 * time.Second + if future := s.raft.Apply(msg, timeout); future.Error() != nil { + s.logger.Error("failed to apply message bytes", zap.Duration("timeout", timeout), zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) Mapping() (*protobuf.MappingResponse, error) { + resp := &protobuf.MappingResponse{} + + m := s.fsm.Mapping() + + fieldsBytes, err := json.Marshal(m) + if err != nil { + s.logger.Error("failed to marshal mapping to bytes", zap.Error(err)) + return resp, err + } + + resp.Mapping = fieldsBytes + + return resp, nil +} diff --git a/server/raft_server_test.go b/server/raft_server_test.go new file mode 100644 index 0000000..11a1b65 --- /dev/null +++ b/server/raft_server_test.go @@ -0,0 +1,1536 @@ +package server + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "testing" + "time" + + "github.com/hashicorp/raft" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/log" + "github.com/mosuka/blast/mapping" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/util" +) + +func Test_RaftServer_Close(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + time.Sleep(10 * time.Second) +} + +func Test_RaftServer_LeaderAddress(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer("node1", raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + leaderAddress, err := server.LeaderAddress(60 * time.Second) + if err != nil { + t.Fatalf("%v", err) + } + if raftAddress != string(leaderAddress) { + t.Fatalf("expected content to see %v, saw %v", raftAddress, string(leaderAddress)) + } +} + +func Test_RaftServer_LeaderID(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + leaderId, err := server.LeaderID(60 * time.Second) + if err != nil { + t.Fatalf("%v", err) + } + if id != string(leaderId) { + t.Fatalf("expected content to see %v, saw %v", id, string(leaderId)) + } +} + +func Test_RaftServer_WaitForDetectLeader(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftServer_State(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + state := server.State() + if raft.Leader != state { + t.Fatalf("expected content to see %v, saw %v", raft.Leader, state) + } +} + +func Test_RaftServer_StateStr(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + state := server.StateStr() + if raft.Leader.String() != state { + t.Fatalf("expected content to see %v, saw %v", raft.Leader.String(), state) + } +} + +func Test_RaftServer_Exist(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + exist, err := server.Exist(id) + if err != nil { + t.Fatalf("%v", err) + } + if !exist { + t.Fatalf("expected content to see %v, saw %v", true, exist) + } + + exist, err = server.Exist("non-existent-id") + if err != nil { + t.Fatalf("%v", err) + } + if exist { + t.Fatalf("expected content to see %v, saw %v", false, exist) + } +} + +func Test_RaftServer_setMetadata(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + metadata := &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + } + + if err := server.setMetadata(id, metadata); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftServer_getMetadata(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + metadata := &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + } + + if err := server.setMetadata(id, metadata); err != nil { + t.Fatalf("%v", err) + } + + m, err := server.getMetadata(id) + if err != nil { + t.Fatalf("%v", err) + } + if grpcAddress != m.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, m.GrpcAddress) + } + if httpAddress != m.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, m.HttpAddress) + } +} + +func Test_RaftServer_deleteMetadata(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + metadata := &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + } + + // set + if err := server.setMetadata(id, metadata); err != nil { + t.Fatalf("%v", err) + } + + // get + m, err := server.getMetadata(id) + if err != nil { + t.Fatalf("%v", err) + } + if grpcAddress != m.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, m.GrpcAddress) + } + if httpAddress != m.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, m.HttpAddress) + } + + // delete + if err := server.deleteMetadata(id); err != nil { + t.Fatalf("%v", err) + } + + //get + m, err = server.getMetadata(id) + if err != nil { + switch err { + case errors.ErrNotFound: + // ok + default: + t.Fatalf("%v", err) + } + } + if err == nil { + t.Fatalf("expected content to see %v, saw %v", nil, err) + } +} + +func Test_RaftServer_Join(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + node := &protobuf.Node{ + RaftAddress: raftAddress, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } + + if err := server.Join(id, node); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } +} + +func Test_RaftServer_Node(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + node := &protobuf.Node{ + RaftAddress: raftAddress, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } + + if err := server.Join(id, node); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + n, err := server.Node() + if err != nil { + t.Fatalf("%v", err) + } + if raftAddress != n.RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, n.RaftAddress) + } + if grpcAddress != n.Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, n.Metadata.GrpcAddress) + } + if httpAddress != n.Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, n.Metadata.HttpAddress) + } +} + +func Test_RaftServer_Cluster(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + node := &protobuf.Node{ + RaftAddress: raftAddress, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } + + if err := server.Join(id, node); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + // ---------- + + id2 := "node2" + raftAddress2 := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress2 := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", util.TmpPort()) + + dir2 := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir2) + }() + + indexMapping2, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger2 := log.NewLogger("WARN", "", 500, 3, 30, false) + + server2, err := NewRaftServer(id2, raftAddress2, dir2, indexMapping2, false, logger2) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server2.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server2.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + node2 := &protobuf.Node{ + RaftAddress: raftAddress2, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + if err := server.Join(id2, node2); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + // ---------- + + id3 := "node3" + raftAddress3 := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress3 := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", util.TmpPort()) + + dir3 := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir3) + }() + + indexMapping3, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger3 := log.NewLogger("WARN", "", 500, 3, 30, false) + + server3, err := NewRaftServer(id3, raftAddress3, dir3, indexMapping3, false, logger3) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server3.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server3.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + node3 := &protobuf.Node{ + RaftAddress: raftAddress3, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + if err := server.Join(id3, node3); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + ns, err := server.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 3 != len(ns) { + t.Fatalf("expected content to see %v, saw %v", 3, len(ns)) + } + if raftAddress != ns[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns[id].RaftAddress) + } + if grpcAddress != ns[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns[id].Metadata.GrpcAddress) + } + if httpAddress != ns[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns[id].Metadata.HttpAddress) + } + if raftAddress2 != ns[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns[id2].RaftAddress) + } + if grpcAddress2 != ns[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns[id2].Metadata.HttpAddress) + } + if raftAddress3 != ns[id3].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress3, ns[id3].RaftAddress) + } + if grpcAddress3 != ns[id3].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress3, ns[id3].Metadata.GrpcAddress) + } + if httpAddress3 != ns[id3].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress3, ns[id3].Metadata.HttpAddress) + } + + time.Sleep(3 * time.Second) + + ns2, err := server2.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 3 != len(ns2) { + t.Fatalf("expected content to see %v, saw %v", 3, len(ns2)) + } + if raftAddress != ns2[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns2[id].RaftAddress) + } + if grpcAddress != ns2[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns2[id].Metadata.GrpcAddress) + } + if httpAddress != ns2[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns2[id].Metadata.HttpAddress) + } + if raftAddress2 != ns2[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns2[id2].RaftAddress) + } + if grpcAddress2 != ns2[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns2[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns2[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns2[id2].Metadata.HttpAddress) + } + if raftAddress3 != ns2[id3].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress3, ns2[id3].RaftAddress) + } + if grpcAddress3 != ns2[id3].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress3, ns2[id3].Metadata.GrpcAddress) + } + if httpAddress3 != ns2[id3].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress3, ns2[id3].Metadata.HttpAddress) + } + + time.Sleep(3 * time.Second) + + ns3, err := server3.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 3 != len(ns3) { + t.Fatalf("expected content to see %v, saw %v", 3, len(ns3)) + } + if raftAddress != ns3[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns3[id].RaftAddress) + } + if grpcAddress != ns3[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns3[id].Metadata.GrpcAddress) + } + if httpAddress != ns3[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns3[id].Metadata.HttpAddress) + } + if raftAddress2 != ns3[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns3[id2].RaftAddress) + } + if grpcAddress2 != ns3[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns3[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns3[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns3[id2].Metadata.HttpAddress) + } + if raftAddress3 != ns3[id3].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress3, ns3[id3].RaftAddress) + } + if grpcAddress3 != ns3[id3].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress3, ns3[id3].Metadata.GrpcAddress) + } + if httpAddress3 != ns3[id3].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress3, ns3[id3].Metadata.HttpAddress) + } +} + +func Test_RaftServer_Leave(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + node := &protobuf.Node{ + RaftAddress: raftAddress, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } + + if err := server.Join(id, node); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + // ---------- + + id2 := "node2" + raftAddress2 := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress2 := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", util.TmpPort()) + + dir2 := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir2) + }() + + indexMapping2, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger2 := log.NewLogger("WARN", "", 500, 3, 30, false) + + server2, err := NewRaftServer(id2, raftAddress2, dir2, indexMapping2, false, logger2) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server2.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server2.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + node2 := &protobuf.Node{ + RaftAddress: raftAddress2, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + if err := server.Join(id2, node2); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + // ---------- + + id3 := "node3" + raftAddress3 := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress3 := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", util.TmpPort()) + + dir3 := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir3) + }() + + indexMapping3, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger3 := log.NewLogger("WARN", "", 500, 3, 30, false) + + server3, err := NewRaftServer(id3, raftAddress3, dir3, indexMapping3, false, logger3) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server3.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server3.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + node3 := &protobuf.Node{ + RaftAddress: raftAddress3, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + if err := server.Join(id3, node3); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + ns, err := server.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 3 != len(ns) { + t.Fatalf("expected content to see %v, saw %v", 3, len(ns)) + } + if raftAddress != ns[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns[id].RaftAddress) + } + if grpcAddress != ns[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns[id].Metadata.GrpcAddress) + } + if httpAddress != ns[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns[id].Metadata.HttpAddress) + } + if raftAddress2 != ns[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns[id2].RaftAddress) + } + if grpcAddress2 != ns[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns[id2].Metadata.HttpAddress) + } + if raftAddress3 != ns[id3].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress3, ns[id3].RaftAddress) + } + if grpcAddress3 != ns[id3].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress3, ns[id3].Metadata.GrpcAddress) + } + if httpAddress3 != ns[id3].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress3, ns[id3].Metadata.HttpAddress) + } + + time.Sleep(3 * time.Second) + + ns2, err := server2.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 3 != len(ns2) { + t.Fatalf("expected content to see %v, saw %v", 3, len(ns2)) + } + if raftAddress != ns2[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns2[id].RaftAddress) + } + if grpcAddress != ns2[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns2[id].Metadata.GrpcAddress) + } + if httpAddress != ns2[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns2[id].Metadata.HttpAddress) + } + if raftAddress2 != ns2[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns2[id2].RaftAddress) + } + if grpcAddress2 != ns2[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns2[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns2[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns2[id2].Metadata.HttpAddress) + } + if raftAddress3 != ns2[id3].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress3, ns2[id3].RaftAddress) + } + if grpcAddress3 != ns2[id3].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress3, ns2[id3].Metadata.GrpcAddress) + } + if httpAddress3 != ns2[id3].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress3, ns2[id3].Metadata.HttpAddress) + } + + time.Sleep(3 * time.Second) + + ns3, err := server3.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 3 != len(ns3) { + t.Fatalf("expected content to see %v, saw %v", 3, len(ns3)) + } + if raftAddress != ns3[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns3[id].RaftAddress) + } + if grpcAddress != ns3[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns3[id].Metadata.GrpcAddress) + } + if httpAddress != ns3[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns3[id].Metadata.HttpAddress) + } + if raftAddress2 != ns3[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns3[id2].RaftAddress) + } + if grpcAddress2 != ns3[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns3[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns3[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns3[id2].Metadata.HttpAddress) + } + if raftAddress3 != ns3[id3].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress3, ns3[id3].RaftAddress) + } + if grpcAddress3 != ns3[id3].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress3, ns3[id3].Metadata.GrpcAddress) + } + if httpAddress3 != ns3[id3].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress3, ns3[id3].Metadata.HttpAddress) + } + + if err := server.Leave(id3); err != nil { + t.Fatalf("%v", err) + } + + ns, err = server.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 2 != len(ns) { + t.Fatalf("expected content to see %v, saw %v", 2, len(ns)) + } + if raftAddress != ns[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns[id].RaftAddress) + } + if grpcAddress != ns[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns[id].Metadata.GrpcAddress) + } + if httpAddress != ns[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns[id].Metadata.HttpAddress) + } + if raftAddress2 != ns[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns[id2].RaftAddress) + } + if grpcAddress2 != ns[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns[id2].Metadata.HttpAddress) + } + if _, ok := ns[id3]; ok { + t.Fatalf("expected content to see %v, saw %v", false, ok) + } + + time.Sleep(3 * time.Second) + + ns2, err = server2.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 2 != len(ns2) { + t.Fatalf("expected content to see %v, saw %v", 2, len(ns2)) + } + if raftAddress != ns2[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns2[id].RaftAddress) + } + if grpcAddress != ns2[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns2[id].Metadata.GrpcAddress) + } + if httpAddress != ns2[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns2[id].Metadata.HttpAddress) + } + if raftAddress2 != ns2[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns2[id2].RaftAddress) + } + if grpcAddress2 != ns2[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns2[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns2[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns2[id2].Metadata.HttpAddress) + } + if _, ok := ns2[id3]; ok { + t.Fatalf("expected content to see %v, saw %v", false, ok) + } +} + +func Test_RaftServer_Set(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(10 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + docId1 := "1" + docFieldsMap1 := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + docFields1, err := json.Marshal(docFieldsMap1) + if err != nil { + t.Fatalf("%v", err) + } + + setReq1 := &protobuf.SetRequest{ + Id: docId1, + Fields: docFields1, + } + + if err := server.Set(setReq1); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftServer_Get(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(10 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + docId1 := "1" + docFieldsMap1 := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + docFields1, err := json.Marshal(docFieldsMap1) + if err != nil { + t.Fatalf("%v", err) + } + + setReq1 := &protobuf.SetRequest{ + Id: docId1, + Fields: docFields1, + } + + if err := server.Set(setReq1); err != nil { + t.Fatalf("%v", err) + } + + f1, err := server.Get(docId1) + if err != nil { + t.Fatalf("%v", err) + } + if docFieldsMap1["title"] != f1["title"] { + t.Fatalf("expected content to see %v, saw %v", docFieldsMap1["title"], f1["title"]) + } +} + +func Test_RaftServer_Delete(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(10 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + docId1 := "1" + docFieldsMap1 := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + docFields1, err := json.Marshal(docFieldsMap1) + if err != nil { + t.Fatalf("%v", err) + } + + setReq1 := &protobuf.SetRequest{ + Id: docId1, + Fields: docFields1, + } + + if err := server.Set(setReq1); err != nil { + t.Fatalf("%v", err) + } + + f1, err := server.Get(docId1) + if err != nil { + t.Fatalf("%v", err) + } + if docFieldsMap1["title"] != f1["title"] { + t.Fatalf("expected content to see %v, saw %v", docFieldsMap1["title"], f1["title"]) + } + + deleteReq1 := &protobuf.DeleteRequest{ + Id: docId1, + } + + if err := server.Delete(deleteReq1); err != nil { + t.Fatalf("%v", err) + } + + f1, err = server.Get(docId1) + if err != nil { + switch err { + case errors.ErrNotFound: + //ok + default: + t.Fatalf("%v", err) + } + } + if f1 != nil { + t.Fatalf("expected content to see %v, saw %v", nil, f1) + } +} + +func Test_RaftServer_Snapshot(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(10 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + docId1 := "1" + docFieldsMap1 := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + docFields1, err := json.Marshal(docFieldsMap1) + if err != nil { + t.Fatalf("%v", err) + } + + setReq1 := &protobuf.SetRequest{ + Id: docId1, + Fields: docFields1, + } + + if err := server.Set(setReq1); err != nil { + t.Fatalf("%v", err) + } + + if err := server.Snapshot(); err != nil { + t.Fatalf("%v", err) + } +} diff --git a/sortutils/sort.go b/sortutils/sort.go deleted file mode 100644 index 9f41b7f..0000000 --- a/sortutils/sort.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sortutils - -import ( - "github.com/blevesearch/bleve/search" -) - -type MultiSearchHitSorter struct { - hits search.DocumentMatchCollection - sort search.SortOrder - cachedScoring []bool - cachedDesc []bool -} - -func NewMultiSearchHitSorter(sort search.SortOrder, hits search.DocumentMatchCollection) *MultiSearchHitSorter { - return &MultiSearchHitSorter{ - sort: sort, - hits: hits, - cachedScoring: sort.CacheIsScore(), - cachedDesc: sort.CacheDescending(), - } -} - -func (m *MultiSearchHitSorter) Len() int { - return len(m.hits) -} - -func (m *MultiSearchHitSorter) Swap(i, j int) { - m.hits[i], m.hits[j] = m.hits[j], m.hits[i] -} - -func (m *MultiSearchHitSorter) Less(i, j int) bool { - c := m.sort.Compare(m.cachedScoring, m.cachedDesc, m.hits[i], m.hits[j]) - - return c < 0 -} diff --git a/storage/index.go b/storage/index.go new file mode 100644 index 0000000..da50a0b --- /dev/null +++ b/storage/index.go @@ -0,0 +1,269 @@ +package storage + +import ( + "os" + "time" + + "github.com/blevesearch/bleve" + "github.com/blevesearch/bleve/document" + "github.com/blevesearch/bleve/index/scorch" + "github.com/blevesearch/bleve/mapping" + _ "github.com/mosuka/blast/builtin" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/protobuf" + "go.uber.org/zap" +) + +type Index struct { + indexMapping *mapping.IndexMappingImpl + logger *zap.Logger + + index bleve.Index +} + +func NewIndex(dir string, indexMapping *mapping.IndexMappingImpl, logger *zap.Logger) (*Index, error) { + var index bleve.Index + + if _, err := os.Stat(dir); os.IsNotExist(err) { + // create new index + index, err = bleve.NewUsing(dir, indexMapping, scorch.Name, scorch.Name, nil) + if err != nil { + logger.Error("failed to create index", zap.String("dir", dir), zap.Error(err)) + return nil, err + } + } else { + // open existing index + index, err = bleve.OpenUsing(dir, map[string]interface{}{ + "create_if_missing": false, + "error_if_exists": false, + }) + if err != nil { + logger.Error("failed to open index", zap.String("dir", dir), zap.Error(err)) + return nil, err + } + } + + return &Index{ + index: index, + indexMapping: indexMapping, + logger: logger, + }, nil +} + +func (i *Index) Close() error { + if err := i.index.Close(); err != nil { + i.logger.Error("failed to close index", zap.Error(err)) + return err + } + + return nil +} + +func (i *Index) Get(id string) (map[string]interface{}, error) { + d, err := i.index.Document(id) + if err != nil { + i.logger.Error("failed to get document", zap.String("id", id), zap.Error(err)) + return nil, err + } + if d == nil { + err := errors.ErrNotFound + i.logger.Debug("document does not found", zap.String("id", id), zap.Error(err)) + return nil, err + } + + fields := make(map[string]interface{}, 0) + for _, f := range d.Fields { + var v interface{} + switch field := f.(type) { + case *document.TextField: + v = string(field.Value()) + case *document.NumericField: + n, err := field.Number() + if err == nil { + v = n + } + case *document.DateTimeField: + d, err := field.DateTime() + if err == nil { + v = d.Format(time.RFC3339Nano) + } + } + existing, existed := fields[f.Name()] + if existed { + switch existing := existing.(type) { + case []interface{}: + fields[f.Name()] = append(existing, v) + case interface{}: + arr := make([]interface{}, 2) + arr[0] = existing + arr[1] = v + fields[f.Name()] = arr + } + } else { + fields[f.Name()] = v + } + } + + return fields, nil +} + +func (i *Index) Search(searchRequest *bleve.SearchRequest) (*bleve.SearchResult, error) { + searchResult, err := i.index.Search(searchRequest) + if err != nil { + i.logger.Error("failed to search documents", zap.Any("search_request", searchRequest), zap.Error(err)) + return nil, err + } + + return searchResult, nil +} + +func (i *Index) Index(id string, fields map[string]interface{}) error { + if err := i.index.Index(id, fields); err != nil { + i.logger.Error("failed to index document", zap.String("id", id), zap.Error(err)) + return err + } + + return nil +} + +func (i *Index) Delete(id string) error { + if err := i.index.Delete(id); err != nil { + i.logger.Error("failed to delete document", zap.String("id", id), zap.Error(err)) + return err + } + + return nil +} + +func (i *Index) BulkIndex(docs []map[string]interface{}) (int, error) { + batch := i.index.NewBatch() + + count := 0 + + for _, doc := range docs { + id, ok := doc["id"].(string) + if !ok { + err := errors.ErrNil + i.logger.Error("missing id", zap.Error(err)) + continue + } + fields, ok := doc["fields"].(map[string]interface{}) + if !ok { + err := errors.ErrNil + i.logger.Error("missing fields", zap.Error(err)) + continue + } + + if err := batch.Index(id, fields); err != nil { + i.logger.Error("failed to index document in batch", zap.String("id", id), zap.Error(err)) + continue + } + count++ + } + + err := i.index.Batch(batch) + if err != nil { + i.logger.Error("failed to index documents", zap.Int("count", count), zap.Error(err)) + return count, err + } + + if count <= 0 { + err := errors.ErrNoUpdate + i.logger.Error("no documents updated", zap.Any("count", count), zap.Error(err)) + return count, err + } + + return count, nil +} + +func (i *Index) BulkDelete(ids []string) (int, error) { + batch := i.index.NewBatch() + + count := 0 + + for _, id := range ids { + batch.Delete(id) + count++ + } + + err := i.index.Batch(batch) + if err != nil { + i.logger.Error("failed to delete documents", zap.Int("count", count), zap.Error(err)) + return count, err + } + + return count, nil +} + +func (i *Index) Mapping() *mapping.IndexMappingImpl { + return i.indexMapping +} + +func (i *Index) Stats() map[string]interface{} { + return i.index.StatsMap() +} + +func (i *Index) SnapshotItems() <-chan *protobuf.Document { + ch := make(chan *protobuf.Document, 1024) + + go func() { + idx, _, err := i.index.Advanced() + if err != nil { + i.logger.Error("failed to get index", zap.Error(err)) + return + } + + ir, err := idx.Reader() + if err != nil { + i.logger.Error("failed to get index reader", zap.Error(err)) + return + } + + docCount := 0 + + dr, err := ir.DocIDReaderAll() + if err != nil { + i.logger.Error("failed to get doc ID reader", zap.Error(err)) + return + } + for { + //if dr == nil { + // i.logger.Error(err.Error()) + // break + //} + id, err := dr.Next() + if id == nil { + i.logger.Debug("finished to read all document IDs") + break + } else if err != nil { + i.logger.Warn("failed to get doc ID", zap.Error(err)) + continue + } + + // get original document + fieldsBytes, err := i.index.GetInternal(id) + if err != nil { + i.logger.Warn("failed to get doc fields bytes", zap.String("id", string(id)), zap.Error(err)) + continue + } + + doc := &protobuf.Document{ + Id: string(id), + Fields: fieldsBytes, + } + + ch <- doc + + docCount = docCount + 1 + } + + i.logger.Debug("finished to write all documents to channel") + ch <- nil + + i.logger.Info("finished to snapshot", zap.Int("count", docCount)) + + return + }() + + return ch +} diff --git a/storage/index_test.go b/storage/index_test.go new file mode 100644 index 0000000..72bd723 --- /dev/null +++ b/storage/index_test.go @@ -0,0 +1,341 @@ +package storage + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "testing" + "time" + + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/log" + "github.com/mosuka/blast/mapping" + "github.com/mosuka/blast/util" +) + +func TestClose(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + index, err := NewIndex(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if index == nil { + t.Fatal("failed to create index") + } + + if err := index.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func TestIndex(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + index, err := NewIndex(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if index == nil { + t.Fatal("failed to create index") + } + + id := "1" + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + + if err := index.Index(id, fields); err != nil { + t.Fatal("failed to index document") + } + + if err := index.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func TestGet(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + index, err := NewIndex(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if index == nil { + t.Fatal("failed to create index") + } + + id := "1" + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + + if err := index.Index(id, fields); err != nil { + t.Fatal("failed to index document") + } + + f, err := index.Get(id) + if err != nil { + t.Fatal("failed to get document") + } + if fields["title"].(string) != f["title"].(string) { + t.Fatalf("expected content to see %v, saw %v", fields["title"].(string), f["title"].(string)) + } + + if err := index.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func TestDelete(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + index, err := NewIndex(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if index == nil { + t.Fatal("failed to create index") + } + + id := "1" + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + + if err := index.Index(id, fields); err != nil { + t.Fatal("failed to index document") + } + + fields, err = index.Get(id) + if err != nil { + t.Fatal("failed to get document") + } + + if err := index.Delete(id); err != nil { + t.Fatal("failed to delete document") + } + + fields, err = index.Get(id) + if err != nil { + switch err { + case errors.ErrNotFound: + // ok + default: + t.Fatal("failed to get document") + } + } + + if err := index.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func TestBulkIndex(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + index, err := NewIndex(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if index == nil { + t.Fatal("failed to create index") + } + + docs := make([]map[string]interface{}, 0) + for i := 1; i <= 100; i++ { + id := strconv.Itoa(i) + fields := map[string]interface{}{ + "title": fmt.Sprintf("Search engine (computing) %d", i), + "text": fmt.Sprintf("A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web. %d", i), + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + doc := map[string]interface{}{ + "id": id, + "fields": fields, + } + + docs = append(docs, doc) + } + + count, err := index.BulkIndex(docs) + if err != nil { + t.Fatal("failed to index documents") + } + if count <= 0 { + t.Fatal("failed to index documents") + } + + if err := index.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func TestBulkDelete(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + index, err := NewIndex(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if index == nil { + t.Fatal("failed to create index") + } + + docs := make([]map[string]interface{}, 0) + for i := 1; i <= 100; i++ { + id := strconv.Itoa(i) + fields := map[string]interface{}{ + "title": fmt.Sprintf("Search engine (computing) %d", i), + "text": fmt.Sprintf("A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web. %d", i), + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + doc := map[string]interface{}{ + "id": id, + "fields": fields, + } + + docs = append(docs, doc) + } + + count, err := index.BulkIndex(docs) + if err != nil { + t.Fatal("failed to index documents") + } + if count <= 0 { + t.Fatal("failed to index documents") + } + + ids := make([]string, 0) + for i := 1; i <= 100; i++ { + id := strconv.Itoa(i) + + ids = append(ids, id) + } + + count, err = index.BulkDelete(ids) + if err != nil { + t.Fatal("failed to delete documents") + } + if count <= 0 { + t.Fatal("failed to delete documents") + } + + if err := index.Close(); err != nil { + t.Fatalf("%v", err) + } +} diff --git a/strutils/strutils.go b/strutils/strutils.go deleted file mode 100644 index 4ea086d..0000000 --- a/strutils/strutils.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strutils - -import ( - "math/rand" - "time" -) - -var randSrc = rand.NewSource(time.Now().UnixNano()) - -const ( - letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" - letterIdxBits = 6 - letterIdxMask = 1<= 0; { - if remain == 0 { - cache, remain = randSrc.Int63(), letterIdxMax - } - idx := int(cache & letterIdxMask) - if idx < len(letters) { - b[i] = letters[idx] - i-- - } - cache >>= letterIdxBits - remain-- - } - - return string(b) -} diff --git a/testutils/testutils.go b/testutils/testutils.go deleted file mode 100644 index 9e0ec2c..0000000 --- a/testutils/testutils.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package testutils - -import ( - "io/ioutil" - "net" -) - -func TmpDir() string { - tmp, _ := ioutil.TempDir("", "") - return tmp -} - -func TmpPort() int { - addr, err := net.ResolveTCPAddr("tcp", "localhost:0") - if err != nil { - return -1 - } - - l, err := net.ListenTCP("tcp", addr) - if err != nil { - return -1 - } - - defer func() { - _ = l.Close() - }() - - return l.Addr().(*net.TCPAddr).Port -} diff --git a/util/temp.go b/util/temp.go new file mode 100644 index 0000000..8f3208f --- /dev/null +++ b/util/temp.go @@ -0,0 +1,29 @@ +package util + +import ( + "io/ioutil" + "net" +) + +func TmpDir() string { + tmp, _ := ioutil.TempDir("", "") + return tmp +} + +func TmpPort() int { + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return -1 + } + + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return -1 + } + + defer func() { + _ = l.Close() + }() + + return l.Addr().(*net.TCPAddr).Port +} diff --git a/version/version.go b/version/version.go index 328268a..1895fc9 100644 --- a/version/version.go +++ b/version/version.go @@ -1,17 +1,3 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package version var ( From 3be0cde18f51d1566b3af305043a01d249922007 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 7 Apr 2020 17:07:44 +0900 Subject: [PATCH 50/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index b660b65..9327f96 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,6 +7,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] +- Delete the experimentally implemented feature for distributed search #127 - Add coverage to Makefile #114 - Docker compose #119 - Bump Bleve version to v0.8.1 #117 From 75b695a8fefe03ae9bf1a67a09e78b65a6f23b17 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 7 Apr 2020 18:10:21 +0900 Subject: [PATCH 51/76] Implement CORS (#128) --- cmd/start.go | 12 +++++++++- cmd/variables.go | 39 ++++++++++++++++-------------- etc/blast.yaml | 27 +++++++++++++++++++++ go.mod | 3 ++- go.sum | 2 ++ server/grpc_gateway.go | 54 +++++++++++++++++++++++++++++++++--------- 6 files changed, 106 insertions(+), 31 deletions(-) diff --git a/cmd/start.go b/cmd/start.go index 8f4e6ca..1f796a7 100644 --- a/cmd/start.go +++ b/cmd/start.go @@ -37,6 +37,10 @@ var ( keyFile = viper.GetString("key_file") commonName = viper.GetString("common_name") + corsAllowedMethods = viper.GetStringSlice("cors_allowed_methods") + corsAllowedOrigins = viper.GetStringSlice("cors_allowed_origins") + corsAllowedHeaders = viper.GetStringSlice("cors_allowed_headers") + logLevel = viper.GetString("log_level") logFile = viper.GetString("log_file") logMaxSize = viper.GetInt("log_max_size") @@ -73,7 +77,7 @@ var ( return err } - grpcGateway, err := server.NewGRPCGateway(httpAddress, grpcAddress, certificateFile, keyFile, commonName, logger) + grpcGateway, err := server.NewGRPCGateway(httpAddress, grpcAddress, certificateFile, keyFile, commonName, corsAllowedMethods, corsAllowedOrigins, corsAllowedHeaders, logger) if err != nil { return err } @@ -186,6 +190,9 @@ func init() { startCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") startCmd.PersistentFlags().StringVar(&keyFile, "key-file", "", "path to the client server TLS key file") startCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + startCmd.PersistentFlags().StringSliceVar(&corsAllowedMethods, "cors-allowed-methods", []string{}, "CORS allowed methods (ex: GET,PUT,DELETE,POST)") + startCmd.PersistentFlags().StringSliceVar(&corsAllowedOrigins, "cors-allowed-origins", []string{}, "CORS allowed origins (ex: http://localhost:8080,http://localhost:80)") + startCmd.PersistentFlags().StringSliceVar(&corsAllowedHeaders, "cors-allowed-headers", []string{}, "CORS allowed headers (ex: content-type,x-some-key)") startCmd.PersistentFlags().StringVar(&logLevel, "log-level", "INFO", "log level") startCmd.PersistentFlags().StringVar(&logFile, "log-file", os.Stderr.Name(), "log file") startCmd.PersistentFlags().IntVar(&logMaxSize, "log-max-size", 500, "max size of a log file in megabytes") @@ -203,6 +210,9 @@ func init() { _ = viper.BindPFlag("certificate_file", startCmd.PersistentFlags().Lookup("certificate-file")) _ = viper.BindPFlag("key_file", startCmd.PersistentFlags().Lookup("key-file")) _ = viper.BindPFlag("common_name", startCmd.PersistentFlags().Lookup("common-name")) + _ = viper.BindPFlag("cors_allowed_methods", startCmd.PersistentFlags().Lookup("cors-allowed-methods")) + _ = viper.BindPFlag("cors_allowed_origins", startCmd.PersistentFlags().Lookup("cors-allowed-origins")) + _ = viper.BindPFlag("cors_allowed_headers", startCmd.PersistentFlags().Lookup("cors-allowed-headers")) _ = viper.BindPFlag("log_level", startCmd.PersistentFlags().Lookup("log-level")) _ = viper.BindPFlag("log_max_size", startCmd.PersistentFlags().Lookup("log-max-size")) _ = viper.BindPFlag("log_max_backups", startCmd.PersistentFlags().Lookup("log-max-backups")) diff --git a/cmd/variables.go b/cmd/variables.go index 8022742..0e0ef9b 100644 --- a/cmd/variables.go +++ b/cmd/variables.go @@ -1,22 +1,25 @@ package cmd var ( - configFile string - id string - raftAddress string - grpcAddress string - httpAddress string - dataDirectory string - peerGrpcAddress string - mappingFile string - certificateFile string - keyFile string - commonName string - file string - logLevel string - logFile string - logMaxSize int - logMaxBackups int - logMaxAge int - logCompress bool + configFile string + id string + raftAddress string + grpcAddress string + httpAddress string + dataDirectory string + peerGrpcAddress string + mappingFile string + certificateFile string + keyFile string + commonName string + corsAllowedMethods []string + corsAllowedOrigins []string + corsAllowedHeaders []string + file string + logLevel string + logFile string + logMaxSize int + logMaxBackups int + logMaxAge int + logCompress bool ) diff --git a/etc/blast.yaml b/etc/blast.yaml index ab362c1..a03e89a 100644 --- a/etc/blast.yaml +++ b/etc/blast.yaml @@ -1,3 +1,6 @@ +# +# General +# id: "node1" raft_address: ":7000" grpc_address: ":9000" @@ -5,9 +8,33 @@ http_address: ":8000" data_directory: "/tmp/blast/node1/data" #mapping_file: "./etc/blast_mapping.json" peer_grpc_address: "" + +# +# TLS +# #certificate_file: "./etc/blast-cert.pem" #key_file: "./etc/blast-key.pem" #common_name: "localhost" + +# +# CORS +# +#cors_allowed_methods: [ +# "GET", +# "PUT", +# "DELETE", +# "POST" +#] +#cors_allowed_origins: [ +# "http://localhost:8080" +#] +#cors_allowed_headers: [ +# "content-type" +#] + +# +# Logging +# log_level: "INFO" log_file: "" #log_max_size: 500 diff --git a/go.mod b/go.mod index a218f2c..0a3a8dd 100644 --- a/go.mod +++ b/go.mod @@ -23,8 +23,9 @@ require ( github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect - github.com/gogo/protobuf v1.3.0 + github.com/gogo/protobuf v1.3.0 // indirect github.com/golang/protobuf v1.3.5 + github.com/gorilla/handlers v1.4.2 github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/grpc-gateway v1.14.3 diff --git a/go.sum b/go.sum index 974344e..c714a35 100644 --- a/go.sum +++ b/go.sum @@ -138,6 +138,8 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg= +github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 h1:0IKlLyQ3Hs9nDaiK5cSHAGmcQEIC8l2Ts1u6x5Dfrqg= diff --git a/server/grpc_gateway.go b/server/grpc_gateway.go index c319fc0..c63572c 100644 --- a/server/grpc_gateway.go +++ b/server/grpc_gateway.go @@ -8,6 +8,7 @@ import ( "time" "github.com/golang/protobuf/proto" + "github.com/gorilla/handlers" "github.com/grpc-ecosystem/grpc-gateway/runtime" "github.com/mosuka/blast/marshaler" "github.com/mosuka/blast/protobuf" @@ -41,10 +42,14 @@ type GRPCGateway struct { certificateFile string keyFile string + corsAllowedMethods []string + corsAllowedOrigins []string + corsAllowedHeaders []string + logger *zap.Logger } -func NewGRPCGateway(httpAddress string, grpcAddress string, certificateFile string, keyFile string, commonName string, logger *zap.Logger) (*GRPCGateway, error) { +func NewGRPCGateway(httpAddress string, grpcAddress string, certificateFile string, keyFile string, commonName string, corsAllowedMethods []string, corsAllowedOrigins []string, corsAllowedHeaders []string, logger *zap.Logger) (*GRPCGateway, error) { dialOpts := []grpc.DialOption{ grpc.WithDefaultCallOptions( grpc.MaxCallSendMsgSize(math.MaxInt64), @@ -90,25 +95,52 @@ func NewGRPCGateway(httpAddress string, grpcAddress string, certificateFile stri } return &GRPCGateway{ - httpAddress: httpAddress, - grpcAddress: grpcAddress, - listener: listener, - mux: mux, - cancel: cancel, - certificateFile: certificateFile, - keyFile: keyFile, - logger: logger, + httpAddress: httpAddress, + grpcAddress: grpcAddress, + listener: listener, + mux: mux, + cancel: cancel, + certificateFile: certificateFile, + keyFile: keyFile, + corsAllowedMethods: corsAllowedMethods, + corsAllowedOrigins: corsAllowedOrigins, + corsAllowedHeaders: corsAllowedHeaders, + logger: logger, }, nil } func (s *GRPCGateway) Start() error { + corsOpts := make([]handlers.CORSOption, 0) + + if s.corsAllowedMethods != nil && len(s.corsAllowedMethods) > 0 { + corsOpts = append(corsOpts, handlers.AllowedMethods(s.corsAllowedMethods)) + } + if s.corsAllowedOrigins != nil && len(s.corsAllowedOrigins) > 0 { + corsOpts = append(corsOpts, handlers.AllowedMethods(s.corsAllowedOrigins)) + } + if s.corsAllowedHeaders != nil && len(s.corsAllowedHeaders) > 0 { + corsOpts = append(corsOpts, handlers.AllowedMethods(s.corsAllowedHeaders)) + } + + corsMux := handlers.CORS( + corsOpts..., + )(s.mux) + if s.certificateFile == "" && s.keyFile == "" { go func() { - _ = http.Serve(s.listener, s.mux) + if len(corsOpts) > 0 { + _ = http.Serve(s.listener, corsMux) + } else { + _ = http.Serve(s.listener, s.mux) + } }() } else { go func() { - _ = http.ServeTLS(s.listener, s.mux, s.certificateFile, s.keyFile) + if len(corsOpts) > 0 { + _ = http.ServeTLS(s.listener, corsMux, s.certificateFile, s.keyFile) + } else { + _ = http.ServeTLS(s.listener, s.mux, s.certificateFile, s.keyFile) + } }() } From 352a9d77bfcaa7710fd322fe5e0f70c4b19a1bee Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 7 Apr 2020 18:10:52 +0900 Subject: [PATCH 52/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index 9327f96..52cb1aa 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,6 +7,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] +- Implement CORS #128 - Delete the experimentally implemented feature for distributed search #127 - Add coverage to Makefile #114 - Docker compose #119 From 4da7abeef7435cdc2065a1398eb6ab3bd709ba66 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 7 Apr 2020 19:33:13 +0900 Subject: [PATCH 53/76] Update CHANGES.md --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 52cb1aa..0294f31 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). -## [Unreleased] +## [v0.9.0] - Implement CORS #128 - Delete the experimentally implemented feature for distributed search #127 From 19318986df441da7a12b4f99a605d33da14663ef Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 7 Apr 2020 19:37:10 +0900 Subject: [PATCH 54/76] Update README.md --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 0d10c58..10c68d1 100644 --- a/README.md +++ b/README.md @@ -195,6 +195,9 @@ Refer to the following table for the options that can be configured. | --certificate-file | BLAST_CERTIFICATE_FILE | certificate_file | path to the client server TLS certificate file | | --key-file | BLAST_KEY_FILE | key_file | path to the client server TLS key file | | --common-name | BLAST_COMMON_NAME | common_name | certificate common name | +| --cors-allowed-methods | BLAST_CORS_ALLOWED_METHODS | cors_allowed_methods | CORS allowed methods (ex: GET,PUT,DELETE,POST) | +| --cors-allowed-origins | BLAST_CORS_ALLOWED_ORIGINS | cors_allowed_origins | CORS allowed origins (ex: http://localhost:8080,http://localhost:80) | +| --cors-allowed-headers | BLAST_CORS_ALLOWED_HEADERS | cors_allowed_headers | CORS allowed headers (ex: content-type,x-some-key) | | --log-level | BLAST_LOG_LEVEL | log_level | log level | | --log-file | BLAST_LOG_FILE | log_file | log file | | --log-max-size | BLAST_LOG_MAX_SIZE | log_max_size | max size of a log file in megabytes | From 6b6493998a2a9faacba56c5a96d5f2ccb3a7180c Mon Sep 17 00:00:00 2001 From: Minoru OSUKA Date: Thu, 11 Jun 2020 22:42:19 +0900 Subject: [PATCH 55/76] Add test (#129) --- server/grpc_gateway_test.go | 38 ++ server/grpc_server_test.go | 72 ++++ server/grpc_service_test.go | 811 ++++++++++++++++++++++++++++++++++++ 3 files changed, 921 insertions(+) create mode 100644 server/grpc_gateway_test.go create mode 100644 server/grpc_server_test.go create mode 100644 server/grpc_service_test.go diff --git a/server/grpc_gateway_test.go b/server/grpc_gateway_test.go new file mode 100644 index 0000000..72b36ee --- /dev/null +++ b/server/grpc_gateway_test.go @@ -0,0 +1,38 @@ +package server + +import ( + "fmt" + "testing" + "time" + + "github.com/mosuka/blast/log" + "github.com/mosuka/blast/util" +) + +func Test_GRPCGateway_Start_Stop(t *testing.T) { + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + certificateFile := "" + KeyFile := "" + commonName := "" + corsAllowedMethods := make([]string, 0) + corsAllowedOrigins := make([]string, 0) + corsAllowedHeaders := make([]string, 0) + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + grpcGateway, err := NewGRPCGateway(httpAddress, grpcAddress, certificateFile, KeyFile, commonName, corsAllowedMethods, corsAllowedOrigins, corsAllowedHeaders, logger) + if err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := grpcGateway.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := grpcGateway.Start(); err != nil { + t.Fatalf("%v", err) + } + + time.Sleep(3 * time.Second) +} diff --git a/server/grpc_server_test.go b/server/grpc_server_test.go new file mode 100644 index 0000000..67596d8 --- /dev/null +++ b/server/grpc_server_test.go @@ -0,0 +1,72 @@ +package server + +import ( + "fmt" + "os" + "path/filepath" + "testing" + "time" + + "github.com/mosuka/blast/log" + "github.com/mosuka/blast/mapping" + "github.com/mosuka/blast/util" +) + +func Test_GRPCServer_Start_Stop(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + // Raft server + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + raftServer, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := raftServer.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + if err := raftServer.Start(); err != nil { + t.Fatalf("%v", err) + } + + // gRPC server + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + certificateFile := "" + keyFile := "" + commonName := "" + + grpcServer, err := NewGRPCServer(grpcAddress, raftServer, certificateFile, keyFile, commonName, logger) + if err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := grpcServer.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := grpcServer.Start(); err != nil { + t.Fatalf("%v", err) + } + + time.Sleep(3 * time.Second) +} diff --git a/server/grpc_service_test.go b/server/grpc_service_test.go new file mode 100644 index 0000000..105841f --- /dev/null +++ b/server/grpc_service_test.go @@ -0,0 +1,811 @@ +package server + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + "time" + + "github.com/hashicorp/raft" + + "github.com/mosuka/blast/protobuf" + + "github.com/golang/protobuf/ptypes/empty" + "github.com/mosuka/blast/log" + "github.com/mosuka/blast/mapping" + "github.com/mosuka/blast/util" +) + +func Test_GRPCService_Start_Stop(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + // Raft server + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + raftServer, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := raftServer.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + if err := raftServer.Start(); err != nil { + t.Fatalf("%v", err) + } + + // gRPC service + certificateFile := "" + commonName := "" + + grpcService, err := NewGRPCService(raftServer, certificateFile, commonName, logger) + if err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := grpcService.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := grpcService.Start(); err != nil { + t.Fatalf("%v", err) + } + + time.Sleep(3 * time.Second) +} + +func Test_GRPCService_LivenessCheck(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + // Raft server + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + raftServer, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := raftServer.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + if err := raftServer.Start(); err != nil { + t.Fatalf("%v", err) + } + + // gRPC service + certificateFile := "" + commonName := "" + + grpcService, err := NewGRPCService(raftServer, certificateFile, commonName, logger) + if err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := grpcService.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := grpcService.Start(); err != nil { + t.Fatalf("%v", err) + } + + time.Sleep(3 * time.Second) + + ctx := context.Background() + req := &empty.Empty{} + + resp, err := grpcService.LivenessCheck(ctx, req) + if err != nil { + t.Fatalf("%v", err) + } + + if !resp.Alive { + t.Fatalf("expected content to see %v, saw %v", true, resp.Alive) + } +} + +func Test_GRPCService_ReadinessCheck(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + // Raft server + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + raftServer, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := raftServer.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + if err := raftServer.Start(); err != nil { + t.Fatalf("%v", err) + } + + // gRPC service + certificateFile := "" + commonName := "" + + grpcService, err := NewGRPCService(raftServer, certificateFile, commonName, logger) + if err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := grpcService.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := grpcService.Start(); err != nil { + t.Fatalf("%v", err) + } + + time.Sleep(3 * time.Second) + + ctx := context.Background() + req := &empty.Empty{} + + resp, err := grpcService.ReadinessCheck(ctx, req) + if err != nil { + t.Fatalf("%v", err) + } + + if !resp.Ready { + t.Fatalf("expected content to see %v, saw %v", true, resp.Ready) + } +} + +func Test_GRPCService_Join(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + // Raft server + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + raftServer, err := NewRaftServer("node1", raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := raftServer.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + if err := raftServer.Start(); err != nil { + t.Fatalf("%v", err) + } + + // gRPC service + certificateFile := "" + commonName := "" + + grpcService, err := NewGRPCService(raftServer, certificateFile, commonName, logger) + if err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := grpcService.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := grpcService.Start(); err != nil { + t.Fatalf("%v", err) + } + + time.Sleep(3 * time.Second) + + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + ctx := context.Background() + req := &protobuf.JoinRequest{ + Id: "node1", + Node: &protobuf.Node{ + RaftAddress: raftAddress, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + }, + } + + _, err = grpcService.Join(ctx, req) + if err != nil { + t.Fatalf("%v", err) + } +} + +func Test_GRPCService_Node(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + // Raft server + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + raftServer, err := NewRaftServer("node1", raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := raftServer.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + if err := raftServer.Start(); err != nil { + t.Fatalf("%v", err) + } + + // gRPC service + certificateFile := "" + commonName := "" + + grpcService, err := NewGRPCService(raftServer, certificateFile, commonName, logger) + if err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := grpcService.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := grpcService.Start(); err != nil { + t.Fatalf("%v", err) + } + + time.Sleep(3 * time.Second) + + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + ctx := context.Background() + req := &protobuf.JoinRequest{ + Id: "node1", + Node: &protobuf.Node{ + RaftAddress: raftAddress, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + }, + } + + _, err = grpcService.Join(ctx, req) + if err != nil { + t.Fatalf("%v", err) + } + + resp, err := grpcService.Node(ctx, &empty.Empty{}) + if err != nil { + t.Fatalf("%v", err) + } + + if raftAddress != resp.Node.RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, resp.Node.RaftAddress) + } + + if grpcAddress != resp.Node.Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, resp.Node.Metadata.GrpcAddress) + } + + if httpAddress != resp.Node.Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, resp.Node.Metadata.HttpAddress) + } + + if raft.Leader.String() != resp.Node.State { + t.Fatalf("expected content to see %v, saw %v", raft.Leader.String(), resp.Node.State) + } +} + +//func Test_GRPCService_Leave(t *testing.T) { +// curDir, err := os.Getwd() +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// tmpDir := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(tmpDir) +// }() +// +// logger := log.NewLogger("WARN", "", 500, 3, 30, false) +// +// certificateFile := "" +// commonName := "" +// +// raftAddress1 := fmt.Sprintf(":%d", util.TmpPort()) +// grpcAddress1 := fmt.Sprintf(":%d", util.TmpPort()) +// httpAddress1 := fmt.Sprintf(":%d", util.TmpPort()) +// +// raftAddress2 := fmt.Sprintf(":%d", util.TmpPort()) +// grpcAddress2 := fmt.Sprintf(":%d", util.TmpPort()) +// httpAddress2 := fmt.Sprintf(":%d", util.TmpPort()) +// +// raftAddress3 := fmt.Sprintf(":%d", util.TmpPort()) +// grpcAddress3 := fmt.Sprintf(":%d", util.TmpPort()) +// httpAddress3 := fmt.Sprintf(":%d", util.TmpPort()) +// +// dir1 := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir1) +// }() +// dir2 := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir2) +// }() +// dir3 := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir3) +// }() +// +// indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// // Raft server +// raftServer1, err := NewRaftServer("node1", raftAddress1, dir1, indexMapping, true, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer1.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer1.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// raftServer2, err := NewRaftServer("node2", raftAddress2, dir2, indexMapping, false, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer2.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer2.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// raftServer3, err := NewRaftServer("node3", raftAddress3, dir3, indexMapping, false, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer3.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer3.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC service +// grpcService1, err := NewGRPCService(raftServer1, certificateFile, commonName, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := grpcService1.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := grpcService1.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// grpcService2, err := NewGRPCService(raftServer2, certificateFile, commonName, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := grpcService2.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := grpcService2.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// grpcService3, err := NewGRPCService(raftServer3, certificateFile, commonName, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := grpcService3.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := grpcService3.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// time.Sleep(3 * time.Second) +// +// ctx := context.Background() +// req1 := &protobuf.JoinRequest{ +// Id: "node1", +// Node: &protobuf.Node{ +// RaftAddress: raftAddress1, +// Metadata: &protobuf.Metadata{ +// GrpcAddress: grpcAddress1, +// HttpAddress: httpAddress1, +// }, +// }, +// } +// _, err = grpcService1.Join(ctx, req1) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// req2 := &protobuf.JoinRequest{ +// Id: "node2", +// Node: &protobuf.Node{ +// RaftAddress: raftAddress2, +// Metadata: &protobuf.Metadata{ +// GrpcAddress: grpcAddress2, +// HttpAddress: httpAddress2, +// }, +// }, +// } +// _, err = grpcService1.Join(ctx, req2) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// req3 := &protobuf.JoinRequest{ +// Id: "node3", +// Node: &protobuf.Node{ +// RaftAddress: raftAddress3, +// Metadata: &protobuf.Metadata{ +// GrpcAddress: grpcAddress3, +// HttpAddress: httpAddress3, +// }, +// }, +// } +// _, err = grpcService1.Join(ctx, req3) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// resp, err := grpcService1.Cluster(ctx, &empty.Empty{}) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// if "node1" != resp.Cluster.Leader { +// t.Fatalf("expected content to see %v, saw %v", "node1", resp.Cluster.Leader) +// } +// +// //if raftAddress1 != resp..RaftAddress { +// // t.Fatalf("expected content to see %v, saw %v", raftAddress1, resp.Node.RaftAddress) +// //} +// // +// //if grpcAddress1 != resp.Node.Metadata.GrpcAddress { +// // t.Fatalf("expected content to see %v, saw %v", grpcAddress1, resp.Node.Metadata.GrpcAddress) +// //} +// // +// //if httpAddress1 != resp.Node.Metadata.HttpAddress { +// // t.Fatalf("expected content to see %v, saw %v", grpcAddress1, resp.Node.Metadata.HttpAddress) +// //} +// // +// //if raft.Leader.String() != resp.Node.State { +// // t.Fatalf("expected content to see %v, saw %v", raft.Leader.String(), resp.Node.State) +// //} +//} + +//func Test_GRPCService_Cluster(t *testing.T) { +// curDir, err := os.Getwd() +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// tmpDir := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(tmpDir) +// }() +// +// // Raft server +// raftAddress1 := fmt.Sprintf(":%d", util.TmpPort()) +// dir1 := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir1) +// }() +// indexMapping1, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) +// if err != nil { +// t.Fatalf("%v", err) +// } +// logger1 := log.NewLogger("WARN", "", 500, 3, 30, false) +// raftServer1, err := NewRaftServer("node1", raftAddress1, dir1, indexMapping1, true, logger1) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer1.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer1.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC service +// certificateFile1 := "" +// commonName1 := "" +// grpcService1, err := NewGRPCService(raftServer1, certificateFile1, commonName1, logger1) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := grpcService1.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// +// if err := grpcService1.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// time.Sleep(3 * time.Second) +// +// grpcAddress1 := fmt.Sprintf(":%d", util.TmpPort()) +// httpAddress1 := fmt.Sprintf(":%d", util.TmpPort()) +// +// ctx1 := context.Background() +// joinReq1 := &protobuf.JoinRequest{ +// Id: "node1", +// Node: &protobuf.Node{ +// RaftAddress: raftAddress1, +// Metadata: &protobuf.Metadata{ +// GrpcAddress: grpcAddress1, +// HttpAddress: httpAddress1, +// }, +// }, +// } +// _, err = grpcService1.Join(ctx1, joinReq1) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// // Raft server +// raftAddress2 := fmt.Sprintf(":%d", util.TmpPort()) +// dir2 := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir2) +// }() +// indexMapping2, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) +// if err != nil { +// t.Fatalf("%v", err) +// } +// logger2 := log.NewLogger("WARN", "", 500, 3, 30, false) +// raftServer2, err := NewRaftServer("node2", raftAddress2, dir2, indexMapping2, false, logger2) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer2.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer2.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC service +// certificateFile2 := "" +// commonName2 := "" +// grpcService2, err := NewGRPCService(raftServer2, certificateFile2, commonName2, logger2) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := grpcService2.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// +// if err := grpcService2.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// time.Sleep(3 * time.Second) +// +// grpcAddress2 := fmt.Sprintf(":%d", util.TmpPort()) +// httpAddress2 := fmt.Sprintf(":%d", util.TmpPort()) +// +// ctx2 := context.Background() +// joinReq2 := &protobuf.JoinRequest{ +// Id: "node2", +// Node: &protobuf.Node{ +// RaftAddress: raftAddress2, +// Metadata: &protobuf.Metadata{ +// GrpcAddress: grpcAddress2, +// HttpAddress: httpAddress2, +// }, +// }, +// } +// _, err = grpcService1.Join(ctx2, joinReq2) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// // Raft server +// raftAddress3 := fmt.Sprintf(":%d", util.TmpPort()) +// dir3 := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir3) +// }() +// indexMapping3, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) +// if err != nil { +// t.Fatalf("%v", err) +// } +// logger3 := log.NewLogger("WARN", "", 500, 3, 30, false) +// raftServer3, err := NewRaftServer("node3", raftAddress3, dir3, indexMapping3, false, logger3) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer3.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer3.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC service +// certificateFile3 := "" +// commonName3 := "" +// grpcService3, err := NewGRPCService(raftServer3, certificateFile3, commonName3, logger3) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := grpcService3.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// +// if err := grpcService3.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// time.Sleep(3 * time.Second) +// +// grpcAddress3 := fmt.Sprintf(":%d", util.TmpPort()) +// httpAddress3 := fmt.Sprintf(":%d", util.TmpPort()) +// +// ctx3 := context.Background() +// joinReq3 := &protobuf.JoinRequest{ +// Id: "node3", +// Node: &protobuf.Node{ +// RaftAddress: raftAddress3, +// Metadata: &protobuf.Metadata{ +// GrpcAddress: grpcAddress3, +// HttpAddress: httpAddress3, +// }, +// }, +// } +// _, err = grpcService1.Join(ctx3, joinReq3) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// respCluster1, err := grpcService1.Cluster(ctx1, &empty.Empty{}) +// if err != nil { +// t.Fatalf("%v", err) +// } +// if 3 != len(respCluster1.Cluster.Nodes) { +// t.Fatalf("expected content to see %v, saw %v", 3, len(respCluster1.Cluster.Nodes)) +// } +// +// respCluster2, err := grpcService2.Cluster(ctx2, &empty.Empty{}) +// if err != nil { +// t.Fatalf("%v", err) +// } +// if 3 != len(respCluster2.Cluster.Nodes) { +// t.Fatalf("expected content to see %v, saw %v", 3, len(respCluster2.Cluster.Nodes)) +// } +// +// respCluster3, err := grpcService2.Cluster(ctx3, &empty.Empty{}) +// if err != nil { +// t.Fatalf("%v", err) +// } +// if 3 != len(respCluster3.Cluster.Nodes) { +// t.Fatalf("expected content to see %v, saw %v", 3, len(respCluster3.Cluster.Nodes)) +// } +//} From c39b19a330a55d7b9333ebeaeccd7169931d5fee Mon Sep 17 00:00:00 2001 From: Minoru OSUKA Date: Thu, 11 Jun 2020 22:43:06 +0900 Subject: [PATCH 56/76] Update CHANGES.md --- CHANGES.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 0294f31..676ea3e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,6 +5,10 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). +## [Unreleased] + +- Add test #129 + ## [v0.9.0] - Implement CORS #128 From 5b31a16272ec939b7a08ccc8a4488ac62541de62 Mon Sep 17 00:00:00 2001 From: Minoru OSUKA Date: Thu, 11 Jun 2020 23:05:22 +0900 Subject: [PATCH 57/76] Upgrade Bleve to v1.0.9 (#130) --- go.mod | 15 +------------ go.sum | 70 +++++++++++++++++++++++++++++++++------------------------- 2 files changed, 41 insertions(+), 44 deletions(-) diff --git a/go.mod b/go.mod index 0a3a8dd..badce95 100644 --- a/go.mod +++ b/go.mod @@ -3,23 +3,13 @@ module github.com/mosuka/blast go 1.14 require ( - github.com/RoaringBitmap/roaring v0.4.17 // indirect github.com/bbva/raft-badger v1.0.0 - github.com/blevesearch/bleve v0.8.0 - github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040 // indirect + github.com/blevesearch/bleve v1.0.9 github.com/blevesearch/cld2 v0.0.0-20200327141045-8b5f551d37f5 // indirect - github.com/blevesearch/go-porterstemmer v1.0.2 // indirect - github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f // indirect - github.com/blevesearch/snowballstem v0.0.0-20200325004757-48afb64082dd // indirect - github.com/couchbase/ghistogram v0.1.0 // indirect - github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498 // indirect - github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe // indirect github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 // indirect github.com/dgraph-io/badger/v2 v2.0.0 - github.com/edsrzf/mmap-go v1.0.0 // indirect - github.com/etcd-io/bbolt v1.3.3 // indirect github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect @@ -40,11 +30,8 @@ require ( github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 // indirect github.com/spf13/cobra v0.0.7 github.com/spf13/viper v1.4.0 - github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 // indirect - github.com/syndtr/goleveldb v1.0.0 // indirect github.com/tebeka/snowball v0.4.1 // indirect github.com/tecbot/gorocksdb v0.0.0-20190705090504-162552197222 // indirect - go.etcd.io/bbolt v1.3.3 // indirect go.uber.org/zap v1.14.1 google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c google.golang.org/grpc v1.28.0 diff --git a/go.sum b/go.sum index c714a35..3e13b3c 100644 --- a/go.sum +++ b/go.sum @@ -7,8 +7,8 @@ github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/RoaringBitmap/roaring v0.4.17 h1:oCYFIFEMSQZrLHpywH7919esI1VSrQZ0pJXkZPGIJ78= -github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= +github.com/RoaringBitmap/roaring v0.4.21 h1:WJ/zIlNX4wQZ9x8Ey33O1UaD9TCTakYsdLFSBcTwH+8= +github.com/RoaringBitmap/roaring v0.4.21/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -24,18 +24,28 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/blevesearch/bleve v0.8.0 h1:DCoCrxscCXrlzVWK92k7Vq4d28lTAFuigVmcgIX0VCo= -github.com/blevesearch/bleve v0.8.0/go.mod h1:Y2lmIkzV6mcNfAnAdOd+ZxHkHchhBfU/xroGIp61wfw= +github.com/blevesearch/bleve v1.0.9 h1:kqw/Ank/61UV9/Bx9kCcnfH6qWPgmS8O5LNfpsgzASg= +github.com/blevesearch/bleve v1.0.9/go.mod h1:tb04/rbU29clbtNgorgFd8XdJea4x3ybYaOjWKr+UBU= github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040 h1:SjYVcfJVZoCfBlg+fkaq2eoZHTf5HaJfaTeTkOtyfHQ= github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040/go.mod h1:WH+MU2F4T0VmSdaPX+Wu5GYoZBrYWdOZWSjzvYcDmqQ= github.com/blevesearch/cld2 v0.0.0-20200327141045-8b5f551d37f5 h1:/4ikScMMYMqsRFWJjCyzd3CNWB0lxvqDkqa5nEv6NMc= github.com/blevesearch/cld2 v0.0.0-20200327141045-8b5f551d37f5/go.mod h1:PN0QNTLs9+j1bKy3d/GB/59wsNBFC4sWLWG3k69lWbc= -github.com/blevesearch/go-porterstemmer v1.0.2 h1:qe7n69gBd1OLY5sHKnxQHIbzn0LNJA4hpAf+5XDxV2I= -github.com/blevesearch/go-porterstemmer v1.0.2/go.mod h1:haWQqFT3RdOGz7PJuM3or/pWNJS1pKkoZJWCkWu0DVA= -github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f h1:kqbi9lqXLLs+zfWlgo1PIiRQ86n33K1JKotjj4rSYOg= -github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f/go.mod h1:IInt5XRvpiGE09KOk9mmCMLjHhydIhNPKPPFLFBB7L8= -github.com/blevesearch/snowballstem v0.0.0-20200325004757-48afb64082dd h1:YVyOs9yxpxqcB93Ul/UbdGTh26TrTafZrLdCqbJ4IXs= -github.com/blevesearch/snowballstem v0.0.0-20200325004757-48afb64082dd/go.mod h1:cdytUvf6FKWA9NpXJihYdZq8TN2AiQ5HOS0UZUz0C9g= +github.com/blevesearch/go-porterstemmer v1.0.3 h1:GtmsqID0aZdCSNiY8SkuPJ12pD4jI+DdXTAn4YRcHCo= +github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M= +github.com/blevesearch/mmap-go v1.0.2 h1:JtMHb+FgQCTTYIhtMvimw15dJwu1Y5lrZDMOFXVWPk0= +github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA= +github.com/blevesearch/segment v0.9.0 h1:5lG7yBCx98or7gK2cHMKPukPZ/31Kag7nONpoBt22Ac= +github.com/blevesearch/segment v0.9.0/go.mod h1:9PfHYUdQCgHktBgvtUOF4x+pc4/l8rdH0u5spnW85UQ= +github.com/blevesearch/snowballstem v0.9.0 h1:lMQ189YspGP6sXvZQ4WZ+MLawfV8wOmPoD/iWeNXm8s= +github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs= +github.com/blevesearch/zap/v11 v11.0.9 h1:wlSrDBeGN1G4M51NQHIXca23ttwUfQpWaK7uhO5lRSo= +github.com/blevesearch/zap/v11 v11.0.9/go.mod h1:47hzinvmY2EvvJruzsSCJpro7so8L1neseaGjrtXHOY= +github.com/blevesearch/zap/v12 v12.0.9 h1:PpatkY+BLVFZf0Ok3/fwgI/I4RU0z5blXFGuQANmqXk= +github.com/blevesearch/zap/v12 v12.0.9/go.mod h1:paQuvxy7yXor+0Mx8p2KNmJgygQbQNN+W6HRfL5Hvwc= +github.com/blevesearch/zap/v13 v13.0.1 h1:NSCM6uKu77Vn/x9nlPp4pE1o/bftqcOWZEHSyZVpGBQ= +github.com/blevesearch/zap/v13 v13.0.1/go.mod h1:XmyNLMvMf8Z5FjLANXwUeDW3e1+o77TTGUWrth7T9WI= +github.com/blevesearch/zap/v14 v14.0.0 h1:HF8Ysjm13qxB0jTGaKLlatNXmJbQD8bY+PrPxm5v4hE= +github.com/blevesearch/zap/v14 v14.0.0/go.mod h1:sUc/gPGJlFbSQ2ZUh/wGRYwkKx+Dg/5p+dd+eq6QMXk= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -55,10 +65,10 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/couchbase/ghistogram v0.1.0 h1:b95QcQTCzjTUocDXp/uMgSNQi8oj1tGwnJ4bODWZnps= github.com/couchbase/ghistogram v0.1.0/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= -github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498 h1:b8rnI4JWbakUNfpmYDxGobTY/jTuF5zHLw0ID75yzuM= -github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498/go.mod h1:mGI1GcdgmlL3Imff7Z+OjkkQ8qSKr443BuZ+qFgWbPQ= -github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe h1:2o6Y7KMjJNsuMTF8f2H2eTKRhqH7+bQbjr+D+LnhE5M= -github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe/go.mod h1:prYTC8EgTu3gwbqJihkud9zRXISvyulAplQ6exdCo1g= +github.com/couchbase/moss v0.1.0 h1:HCL+xxHUwmOaL44kMM/gU08OW6QGCui1WVFO58bjhNI= +github.com/couchbase/moss v0.1.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs= +github.com/couchbase/vellum v1.0.1 h1:qrj9ohvZedvc51S5KzPfJ6P6z0Vqzv7Lx7k3mVc2WOk= +github.com/couchbase/vellum v1.0.1/go.mod h1:FcwrEivFpNi24R3jLOs3n+fs5RnuQnQqCLBJ1uAg1W4= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= @@ -85,13 +95,9 @@ github.com/dgryski/go-farm v0.0.0-20191112170834-c2139c5d712b/go.mod h1:SqUrOPUn github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM= -github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= @@ -103,8 +109,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493 h1:OTanQnFt0bi5iLFSdbEVA/idR6Q2WhCm+deb7ir2CcM= -github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -136,8 +142,8 @@ github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= +github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg= github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -185,6 +191,7 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kljensen/snowball v0.6.0/go.mod h1:27N7E8fVU5H68RlUmnWwZCfxgt4POBJfENGMvNRhldw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= @@ -208,6 +215,8 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= +github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= @@ -257,6 +266,7 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -270,10 +280,6 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5I github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff h1:86HlEv0yBCry9syNuylzqznKXDK11p6D0DT596yNMys= -github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= @@ -294,8 +300,8 @@ github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 h1:JNEGSiWg6D3lcBCMCBqN3ELniXujt+0QNHLhNnO0w3s= -github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2/go.mod h1:mjqs7N0Q6m5HpR7QfXVBZXZWSqTjQLeTujjA/xUp2uw= +github.com/steveyen/gtreap v0.1.0 h1:CjhzTa274PyJLJuMZwIzCO1PfC00oRa8d1Kc78bFXJM= +github.com/steveyen/gtreap v0.1.0/go.mod h1:kl/5J7XbrOmlIbYIXdRHDDE5QxHqpk0cmkT7Z4dM9/Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -320,8 +326,8 @@ github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPy github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -373,6 +379,7 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -380,8 +387,11 @@ golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5 h1:sM3evRHxE/1RuMe1FYAL3j7C7 golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 3b4437d54df864d0c985e7913a085e5cd358955a Mon Sep 17 00:00:00 2001 From: Minoru OSUKA Date: Fri, 12 Jun 2020 00:10:09 +0900 Subject: [PATCH 58/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index 676ea3e..0a91c43 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,6 +7,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] +- Upgrade Bleve to v1.0.9 #130 - Add test #129 ## [v0.9.0] From d2466992774aefa5c52ce91c826a891dd729d360 Mon Sep 17 00:00:00 2001 From: Minoru OSUKA Date: Fri, 12 Jun 2020 22:20:09 +0900 Subject: [PATCH 59/76] Update tests (#131) --- cmd/start.go | 2 +- server/grpc_server.go | 6 +- server/grpc_server_test.go | 17 +- server/grpc_service_test.go | 944 +++++++++++++++++++++++------------- 4 files changed, 616 insertions(+), 353 deletions(-) diff --git a/cmd/start.go b/cmd/start.go index 1f796a7..a2b0fcd 100644 --- a/cmd/start.go +++ b/cmd/start.go @@ -72,7 +72,7 @@ var ( return err } - grpcServer, err := server.NewGRPCServer(grpcAddress, raftServer, certificateFile, keyFile, commonName, logger) + grpcServer, err := server.NewGRPCServerWithTLS(grpcAddress, raftServer, certificateFile, keyFile, commonName, logger) if err != nil { return err } diff --git a/server/grpc_server.go b/server/grpc_server.go index d01f5a3..2320ccb 100644 --- a/server/grpc_server.go +++ b/server/grpc_server.go @@ -29,7 +29,11 @@ type GRPCServer struct { logger *zap.Logger } -func NewGRPCServer(grpcAddress string, raftServer *RaftServer, certificateFile string, keyFile string, commonName string, logger *zap.Logger) (*GRPCServer, error) { +func NewGRPCServer(grpcAddress string, raftServer *RaftServer, logger *zap.Logger) (*GRPCServer, error) { + return NewGRPCServerWithTLS(grpcAddress, raftServer, "", "", "", logger) +} + +func NewGRPCServerWithTLS(grpcAddress string, raftServer *RaftServer, certificateFile string, keyFile string, commonName string, logger *zap.Logger) (*GRPCServer, error) { grpcLogger := logger.Named("grpc") opts := []grpc.ServerOption{ diff --git a/server/grpc_server_test.go b/server/grpc_server_test.go index 67596d8..82e4220 100644 --- a/server/grpc_server_test.go +++ b/server/grpc_server_test.go @@ -25,17 +25,21 @@ func Test_GRPCServer_Start_Stop(t *testing.T) { logger := log.NewLogger("WARN", "", 500, 3, 30, false) - // Raft server - rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + dir := util.TmpDir() defer func() { _ = os.RemoveAll(dir) }() + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) if err != nil { t.Fatalf("%v", err) } - raftServer, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + + // Raft server + raftServer, err := NewRaftServer("node1", raftAddress, dir, indexMapping, true, logger) if err != nil { t.Fatalf("%v", err) } @@ -49,12 +53,7 @@ func Test_GRPCServer_Start_Stop(t *testing.T) { } // gRPC server - grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) - certificateFile := "" - keyFile := "" - commonName := "" - - grpcServer, err := NewGRPCServer(grpcAddress, raftServer, certificateFile, keyFile, commonName, logger) + grpcServer, err := NewGRPCServer(grpcAddress, raftServer, logger) if err != nil { t.Fatalf("%v", err) } diff --git a/server/grpc_service_test.go b/server/grpc_service_test.go index 105841f..e7c89c6 100644 --- a/server/grpc_service_test.go +++ b/server/grpc_service_test.go @@ -1,18 +1,12 @@ package server import ( - "context" "fmt" "os" "path/filepath" "testing" "time" - "github.com/hashicorp/raft" - - "github.com/mosuka/blast/protobuf" - - "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/log" "github.com/mosuka/blast/mapping" "github.com/mosuka/blast/util" @@ -75,320 +69,234 @@ func Test_GRPCService_Start_Stop(t *testing.T) { time.Sleep(3 * time.Second) } -func Test_GRPCService_LivenessCheck(t *testing.T) { - curDir, err := os.Getwd() - if err != nil { - t.Fatalf("%v", err) - } - - tmpDir := util.TmpDir() - defer func() { - _ = os.RemoveAll(tmpDir) - }() - - logger := log.NewLogger("WARN", "", 500, 3, 30, false) - - // Raft server - rafAddress := fmt.Sprintf(":%d", util.TmpPort()) - dir := util.TmpDir() - defer func() { - _ = os.RemoveAll(dir) - }() - indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - raftServer, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - if err := raftServer.Stop(); err != nil { - t.Fatalf("%v", err) - } - }() - if err := raftServer.Start(); err != nil { - t.Fatalf("%v", err) - } - - // gRPC service - certificateFile := "" - commonName := "" - - grpcService, err := NewGRPCService(raftServer, certificateFile, commonName, logger) - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - if err := grpcService.Stop(); err != nil { - t.Fatalf("%v", err) - } - }() - - if err := grpcService.Start(); err != nil { - t.Fatalf("%v", err) - } - - time.Sleep(3 * time.Second) - - ctx := context.Background() - req := &empty.Empty{} - - resp, err := grpcService.LivenessCheck(ctx, req) - if err != nil { - t.Fatalf("%v", err) - } - - if !resp.Alive { - t.Fatalf("expected content to see %v, saw %v", true, resp.Alive) - } -} - -func Test_GRPCService_ReadinessCheck(t *testing.T) { - curDir, err := os.Getwd() - if err != nil { - t.Fatalf("%v", err) - } - - tmpDir := util.TmpDir() - defer func() { - _ = os.RemoveAll(tmpDir) - }() - - logger := log.NewLogger("WARN", "", 500, 3, 30, false) - - // Raft server - rafAddress := fmt.Sprintf(":%d", util.TmpPort()) - dir := util.TmpDir() - defer func() { - _ = os.RemoveAll(dir) - }() - indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - raftServer, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - if err := raftServer.Stop(); err != nil { - t.Fatalf("%v", err) - } - }() - if err := raftServer.Start(); err != nil { - t.Fatalf("%v", err) - } - - // gRPC service - certificateFile := "" - commonName := "" - - grpcService, err := NewGRPCService(raftServer, certificateFile, commonName, logger) - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - if err := grpcService.Stop(); err != nil { - t.Fatalf("%v", err) - } - }() - - if err := grpcService.Start(); err != nil { - t.Fatalf("%v", err) - } - - time.Sleep(3 * time.Second) - - ctx := context.Background() - req := &empty.Empty{} - - resp, err := grpcService.ReadinessCheck(ctx, req) - if err != nil { - t.Fatalf("%v", err) - } - - if !resp.Ready { - t.Fatalf("expected content to see %v, saw %v", true, resp.Ready) - } -} - -func Test_GRPCService_Join(t *testing.T) { - curDir, err := os.Getwd() - if err != nil { - t.Fatalf("%v", err) - } - - tmpDir := util.TmpDir() - defer func() { - _ = os.RemoveAll(tmpDir) - }() - - logger := log.NewLogger("WARN", "", 500, 3, 30, false) - - // Raft server - raftAddress := fmt.Sprintf(":%d", util.TmpPort()) - dir := util.TmpDir() - defer func() { - _ = os.RemoveAll(dir) - }() - indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - raftServer, err := NewRaftServer("node1", raftAddress, dir, indexMapping, true, logger) - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - if err := raftServer.Stop(); err != nil { - t.Fatalf("%v", err) - } - }() - if err := raftServer.Start(); err != nil { - t.Fatalf("%v", err) - } - - // gRPC service - certificateFile := "" - commonName := "" - - grpcService, err := NewGRPCService(raftServer, certificateFile, commonName, logger) - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - if err := grpcService.Stop(); err != nil { - t.Fatalf("%v", err) - } - }() - - if err := grpcService.Start(); err != nil { - t.Fatalf("%v", err) - } - - time.Sleep(3 * time.Second) - - grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) - httpAddress := fmt.Sprintf(":%d", util.TmpPort()) - - ctx := context.Background() - req := &protobuf.JoinRequest{ - Id: "node1", - Node: &protobuf.Node{ - RaftAddress: raftAddress, - Metadata: &protobuf.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, - }, - }, - } - - _, err = grpcService.Join(ctx, req) - if err != nil { - t.Fatalf("%v", err) - } -} - -func Test_GRPCService_Node(t *testing.T) { - curDir, err := os.Getwd() - if err != nil { - t.Fatalf("%v", err) - } - - tmpDir := util.TmpDir() - defer func() { - _ = os.RemoveAll(tmpDir) - }() - - logger := log.NewLogger("WARN", "", 500, 3, 30, false) - - // Raft server - raftAddress := fmt.Sprintf(":%d", util.TmpPort()) - dir := util.TmpDir() - defer func() { - _ = os.RemoveAll(dir) - }() - indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - raftServer, err := NewRaftServer("node1", raftAddress, dir, indexMapping, true, logger) - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - if err := raftServer.Stop(); err != nil { - t.Fatalf("%v", err) - } - }() - if err := raftServer.Start(); err != nil { - t.Fatalf("%v", err) - } - - // gRPC service - certificateFile := "" - commonName := "" - - grpcService, err := NewGRPCService(raftServer, certificateFile, commonName, logger) - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - if err := grpcService.Stop(); err != nil { - t.Fatalf("%v", err) - } - }() - - if err := grpcService.Start(); err != nil { - t.Fatalf("%v", err) - } - - time.Sleep(3 * time.Second) - - grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) - httpAddress := fmt.Sprintf(":%d", util.TmpPort()) - - ctx := context.Background() - req := &protobuf.JoinRequest{ - Id: "node1", - Node: &protobuf.Node{ - RaftAddress: raftAddress, - Metadata: &protobuf.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, - }, - }, - } - - _, err = grpcService.Join(ctx, req) - if err != nil { - t.Fatalf("%v", err) - } - - resp, err := grpcService.Node(ctx, &empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - - if raftAddress != resp.Node.RaftAddress { - t.Fatalf("expected content to see %v, saw %v", raftAddress, resp.Node.RaftAddress) - } - - if grpcAddress != resp.Node.Metadata.GrpcAddress { - t.Fatalf("expected content to see %v, saw %v", grpcAddress, resp.Node.Metadata.GrpcAddress) - } - - if httpAddress != resp.Node.Metadata.HttpAddress { - t.Fatalf("expected content to see %v, saw %v", grpcAddress, resp.Node.Metadata.HttpAddress) - } +//func Test_GRPCService_LivenessCheck(t *testing.T) { +// curDir, err := os.Getwd() +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// tmpDir := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(tmpDir) +// }() +// +// logger := log.NewLogger("WARN", "", 500, 3, 30, false) +// +// raftAddress := fmt.Sprintf(":%d", util.TmpPort()) +// grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) +// +// // Raft server +// dir := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir) +// }() +// indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) +// if err != nil { +// t.Fatalf("%v", err) +// } +// raftServer, err := NewRaftServer("node1", raftAddress, dir, indexMapping, true, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC service +// certificateFile := "" +// commonName := "" +// +// grpcService, err := NewGRPCService(raftServer, certificateFile, commonName, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := grpcService.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// +// if err := grpcService.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // server +// opts := []grpc.ServerOption{ +// grpc.MaxRecvMsgSize(math.MaxInt64), +// grpc.MaxSendMsgSize(math.MaxInt64), +// grpc.StreamInterceptor( +// grpcmiddleware.ChainStreamServer( +// metric.GrpcMetrics.StreamServerInterceptor(), +// grpczap.StreamServerInterceptor(logger), +// ), +// ), +// grpc.UnaryInterceptor( +// grpcmiddleware.ChainUnaryServer( +// metric.GrpcMetrics.UnaryServerInterceptor(), +// grpczap.UnaryServerInterceptor(logger), +// ), +// ), +// grpc.KeepaliveParams( +// keepalive.ServerParameters{ +// //MaxConnectionIdle: 0, +// //MaxConnectionAge: 0, +// //MaxConnectionAgeGrace: 0, +// Time: 5 * time.Second, +// Timeout: 5 * time.Second, +// }, +// ), +// } +// grpcServer := grpc.NewServer( +// opts..., +// ) +// protobuf.RegisterIndexServer(grpcServer, grpcService) +// listener, err := net.Listen("tcp", grpcAddress) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// grpcServer.Stop() +// }() +// go func() { +// if err := grpcServer.Serve(listener); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// +// time.Sleep(3 * time.Second) +// +// ctx := context.Background() +// req := &empty.Empty{} +// +// resp, err := grpcService.LivenessCheck(ctx, req) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// if !resp.Alive { +// t.Fatalf("expected content to see %v, saw %v", true, resp.Alive) +// } +//} - if raft.Leader.String() != resp.Node.State { - t.Fatalf("expected content to see %v, saw %v", raft.Leader.String(), resp.Node.State) - } -} +//func Test_GRPCService_ReadinessCheck(t *testing.T) { +// curDir, err := os.Getwd() +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// tmpDir := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(tmpDir) +// }() +// +// logger := log.NewLogger("WARN", "", 500, 3, 30, false) +// +// raftAddress := fmt.Sprintf(":%d", util.TmpPort()) +// grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) +// +// // Raft server +// dir := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir) +// }() +// indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) +// if err != nil { +// t.Fatalf("%v", err) +// } +// raftServer, err := NewRaftServer("node1", raftAddress, dir, indexMapping, true, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC service +// certificateFile := "" +// commonName := "" +// +// grpcService, err := NewGRPCService(raftServer, certificateFile, commonName, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := grpcService.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := grpcService.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // server +// opts := []grpc.ServerOption{ +// grpc.MaxRecvMsgSize(math.MaxInt64), +// grpc.MaxSendMsgSize(math.MaxInt64), +// grpc.StreamInterceptor( +// grpcmiddleware.ChainStreamServer( +// metric.GrpcMetrics.StreamServerInterceptor(), +// grpczap.StreamServerInterceptor(logger), +// ), +// ), +// grpc.UnaryInterceptor( +// grpcmiddleware.ChainUnaryServer( +// metric.GrpcMetrics.UnaryServerInterceptor(), +// grpczap.UnaryServerInterceptor(logger), +// ), +// ), +// grpc.KeepaliveParams( +// keepalive.ServerParameters{ +// //MaxConnectionIdle: 0, +// //MaxConnectionAge: 0, +// //MaxConnectionAgeGrace: 0, +// Time: 5 * time.Second, +// Timeout: 5 * time.Second, +// }, +// ), +// } +// grpcServer := grpc.NewServer( +// opts..., +// ) +// protobuf.RegisterIndexServer(grpcServer, grpcService) +// listener, err := net.Listen("tcp", grpcAddress) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// grpcServer.Stop() +// }() +// go func() { +// if err := grpcServer.Serve(listener); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// +// time.Sleep(3 * time.Second) +// +// ctx := context.Background() +// req := &empty.Empty{} +// +// resp, err := grpcService.ReadinessCheck(ctx, req) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// if !resp.Ready { +// t.Fatalf("expected content to see %v, saw %v", true, resp.Ready) +// } +//} -//func Test_GRPCService_Leave(t *testing.T) { +//func Test_GRPCService_Join(t *testing.T) { // curDir, err := os.Getwd() // if err != nil { // t.Fatalf("%v", err) @@ -404,73 +312,319 @@ func Test_GRPCService_Node(t *testing.T) { // certificateFile := "" // commonName := "" // -// raftAddress1 := fmt.Sprintf(":%d", util.TmpPort()) -// grpcAddress1 := fmt.Sprintf(":%d", util.TmpPort()) -// httpAddress1 := fmt.Sprintf(":%d", util.TmpPort()) +// raftAddress := fmt.Sprintf(":%d", util.TmpPort()) +// grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) +// httpAddress := fmt.Sprintf(":%d", util.TmpPort()) // -// raftAddress2 := fmt.Sprintf(":%d", util.TmpPort()) -// grpcAddress2 := fmt.Sprintf(":%d", util.TmpPort()) -// httpAddress2 := fmt.Sprintf(":%d", util.TmpPort()) +// dir := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir) +// }() +// indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) +// if err != nil { +// t.Fatalf("%v", err) +// } // -// raftAddress3 := fmt.Sprintf(":%d", util.TmpPort()) -// grpcAddress3 := fmt.Sprintf(":%d", util.TmpPort()) -// httpAddress3 := fmt.Sprintf(":%d", util.TmpPort()) +// // Raft server +// raftServer, err := NewRaftServer("node1", raftAddress, dir, indexMapping, true, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer.Start(); err != nil { +// t.Fatalf("%v", err) +// } // -// dir1 := util.TmpDir() +// // gRPC service +// grpcService, err := NewGRPCService(raftServer, certificateFile, commonName, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } // defer func() { -// _ = os.RemoveAll(dir1) +// if err := grpcService.Stop(); err != nil { +// t.Fatalf("%v", err) +// } // }() -// dir2 := util.TmpDir() +// if err := grpcService.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // server +// opts := []grpc.ServerOption{ +// grpc.MaxRecvMsgSize(math.MaxInt64), +// grpc.MaxSendMsgSize(math.MaxInt64), +// grpc.StreamInterceptor( +// grpcmiddleware.ChainStreamServer( +// metric.GrpcMetrics.StreamServerInterceptor(), +// grpczap.StreamServerInterceptor(logger), +// ), +// ), +// grpc.UnaryInterceptor( +// grpcmiddleware.ChainUnaryServer( +// metric.GrpcMetrics.UnaryServerInterceptor(), +// grpczap.UnaryServerInterceptor(logger), +// ), +// ), +// grpc.KeepaliveParams( +// keepalive.ServerParameters{ +// //MaxConnectionIdle: 0, +// //MaxConnectionAge: 0, +// //MaxConnectionAgeGrace: 0, +// Time: 5 * time.Second, +// Timeout: 5 * time.Second, +// }, +// ), +// } +// grpcServer := grpc.NewServer( +// opts..., +// ) +// protobuf.RegisterIndexServer(grpcServer, grpcService) +// listener, err := net.Listen("tcp", grpcAddress) +// if err != nil { +// t.Fatalf("%v", err) +// } // defer func() { -// _ = os.RemoveAll(dir2) +// grpcServer.Stop() // }() -// dir3 := util.TmpDir() +// go func() { +// if err := grpcServer.Serve(listener); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// +// time.Sleep(3 * time.Second) +// +// ctx := context.Background() +// req := &protobuf.JoinRequest{ +// Id: "node1", +// Node: &protobuf.Node{ +// RaftAddress: raftAddress, +// Metadata: &protobuf.Metadata{ +// GrpcAddress: grpcAddress, +// HttpAddress: httpAddress, +// }, +// }, +// } +// +// _, err = grpcService.Join(ctx, req) +// if err != nil { +// t.Fatalf("%v", err) +// } +//} + +//func Test_GRPCService_Node(t *testing.T) { +// curDir, err := os.Getwd() +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// tmpDir := util.TmpDir() // defer func() { -// _ = os.RemoveAll(dir3) +// _ = os.RemoveAll(tmpDir) // }() // +// logger := log.NewLogger("WARN", "", 500, 3, 30, false) +// +// raftAddress := fmt.Sprintf(":%d", util.TmpPort()) +// grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) +// httpAddress := fmt.Sprintf(":%d", util.TmpPort()) +// +// dir := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir) +// }() // indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) // if err != nil { // t.Fatalf("%v", err) // } // // // Raft server -// raftServer1, err := NewRaftServer("node1", raftAddress1, dir1, indexMapping, true, logger) +// raftServer, err := NewRaftServer("node1", raftAddress, dir, indexMapping, true, logger) // if err != nil { // t.Fatalf("%v", err) // } // defer func() { -// if err := raftServer1.Stop(); err != nil { +// if err := raftServer.Stop(); err != nil { // t.Fatalf("%v", err) // } // }() -// if err := raftServer1.Start(); err != nil { +// if err := raftServer.Start(); err != nil { // t.Fatalf("%v", err) // } // -// raftServer2, err := NewRaftServer("node2", raftAddress2, dir2, indexMapping, false, logger) +// // gRPC service +// certificateFile := "" +// commonName := "" +// +// grpcService, err := NewGRPCService(raftServer, certificateFile, commonName, logger) // if err != nil { // t.Fatalf("%v", err) // } // defer func() { -// if err := raftServer2.Stop(); err != nil { +// if err := grpcService.Stop(); err != nil { // t.Fatalf("%v", err) // } // }() -// if err := raftServer2.Start(); err != nil { +// +// if err := grpcService.Start(); err != nil { // t.Fatalf("%v", err) // } // -// raftServer3, err := NewRaftServer("node3", raftAddress3, dir3, indexMapping, false, logger) +// // server +// opts := []grpc.ServerOption{ +// grpc.MaxRecvMsgSize(math.MaxInt64), +// grpc.MaxSendMsgSize(math.MaxInt64), +// grpc.StreamInterceptor( +// grpcmiddleware.ChainStreamServer( +// metric.GrpcMetrics.StreamServerInterceptor(), +// grpczap.StreamServerInterceptor(logger), +// ), +// ), +// grpc.UnaryInterceptor( +// grpcmiddleware.ChainUnaryServer( +// metric.GrpcMetrics.UnaryServerInterceptor(), +// grpczap.UnaryServerInterceptor(logger), +// ), +// ), +// grpc.KeepaliveParams( +// keepalive.ServerParameters{ +// //MaxConnectionIdle: 0, +// //MaxConnectionAge: 0, +// //MaxConnectionAgeGrace: 0, +// Time: 5 * time.Second, +// Timeout: 5 * time.Second, +// }, +// ), +// } +// grpcServer := grpc.NewServer( +// opts..., +// ) +// protobuf.RegisterIndexServer(grpcServer, grpcService) +// listener, err := net.Listen("tcp", grpcAddress) // if err != nil { // t.Fatalf("%v", err) // } // defer func() { -// if err := raftServer3.Stop(); err != nil { +// grpcServer.Stop() +// }() +// go func() { +// if err := grpcServer.Serve(listener); err != nil { // t.Fatalf("%v", err) // } // }() -// if err := raftServer3.Start(); err != nil { +// +// time.Sleep(3 * time.Second) +// +// ctx := context.Background() +// req := &protobuf.JoinRequest{ +// Id: "node1", +// Node: &protobuf.Node{ +// RaftAddress: raftAddress, +// Metadata: &protobuf.Metadata{ +// GrpcAddress: grpcAddress, +// HttpAddress: httpAddress, +// }, +// }, +// } +// +// _, err = grpcService.Join(ctx, req) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// resp, err := grpcService.Node(ctx, &empty.Empty{}) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// if raftAddress != resp.Node.RaftAddress { +// t.Fatalf("expected content to see %v, saw %v", raftAddress, resp.Node.RaftAddress) +// } +// +// if grpcAddress != resp.Node.Metadata.GrpcAddress { +// t.Fatalf("expected content to see %v, saw %v", grpcAddress, resp.Node.Metadata.GrpcAddress) +// } +// +// if httpAddress != resp.Node.Metadata.HttpAddress { +// t.Fatalf("expected content to see %v, saw %v", grpcAddress, resp.Node.Metadata.HttpAddress) +// } +// +// if raft.Leader.String() != resp.Node.State { +// t.Fatalf("expected content to see %v, saw %v", raft.Leader.String(), resp.Node.State) +// } +//} + +//func Test_GRPCService_Leave(t *testing.T) { +// curDir, err := os.Getwd() +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// tmpDir := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(tmpDir) +// }() +// +// logger := log.NewLogger("WARN", "", 500, 3, 30, false) +// +// certificateFile := "" +// commonName := "" +// +// indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) +// if err != nil { +// t.Fatalf("%v", err) +// } +// +// opts := []grpc.ServerOption{ +// grpc.MaxRecvMsgSize(math.MaxInt64), +// grpc.MaxSendMsgSize(math.MaxInt64), +// grpc.StreamInterceptor( +// grpcmiddleware.ChainStreamServer( +// metric.GrpcMetrics.StreamServerInterceptor(), +// grpczap.StreamServerInterceptor(logger), +// ), +// ), +// grpc.UnaryInterceptor( +// grpcmiddleware.ChainUnaryServer( +// metric.GrpcMetrics.UnaryServerInterceptor(), +// grpczap.UnaryServerInterceptor(logger), +// ), +// ), +// grpc.KeepaliveParams( +// keepalive.ServerParameters{ +// //MaxConnectionIdle: 0, +// //MaxConnectionAge: 0, +// //MaxConnectionAgeGrace: 0, +// Time: 5 * time.Second, +// Timeout: 5 * time.Second, +// }, +// ), +// } +// +// // Node1 +// raftAddress1 := fmt.Sprintf(":%d", util.TmpPort()) +// grpcAddress1 := fmt.Sprintf(":%d", util.TmpPort()) +// httpAddress1 := fmt.Sprintf(":%d", util.TmpPort()) +// dir1 := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir1) +// }() +// +// // Raft server +// raftServer1, err := NewRaftServer("node1", raftAddress1, dir1, indexMapping, true, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer1.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer1.Start(); err != nil { // t.Fatalf("%v", err) // } // @@ -488,6 +642,52 @@ func Test_GRPCService_Node(t *testing.T) { // t.Fatalf("%v", err) // } // +// // gRPC server +// grpcServer1 := grpc.NewServer( +// opts..., +// ) +// protobuf.RegisterIndexServer(grpcServer1, grpcService1) +// listener1, err := net.Listen("tcp", grpcAddress1) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// grpcServer1.Stop() +// }() +// go func() { +// if err := grpcServer1.Serve(listener1); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer1.WaitForDetectLeader(60 * time.Second); err != nil { +// t.Fatalf("%v", err) +// } +// time.Sleep(3 * time.Second) +// +// // Node2 +// raftAddress2 := fmt.Sprintf(":%d", util.TmpPort()) +// grpcAddress2 := fmt.Sprintf(":%d", util.TmpPort()) +// httpAddress2 := fmt.Sprintf(":%d", util.TmpPort()) +// dir2 := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir2) +// }() +// +// // Raft server +// raftServer2, err := NewRaftServer("node2", raftAddress2, dir2, indexMapping, false, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer2.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer2.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC service // grpcService2, err := NewGRPCService(raftServer2, certificateFile, commonName, logger) // if err != nil { // t.Fatalf("%v", err) @@ -501,6 +701,49 @@ func Test_GRPCService_Node(t *testing.T) { // t.Fatalf("%v", err) // } // +// // gRPC server +// grpcServer2 := grpc.NewServer( +// opts..., +// ) +// protobuf.RegisterIndexServer(grpcServer2, grpcService2) +// listener2, err := net.Listen("tcp", grpcAddress2) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// grpcServer2.Stop() +// }() +// go func() { +// if err := grpcServer2.Serve(listener2); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// time.Sleep(3 * time.Second) +// +// // Node3 +// raftAddress3 := fmt.Sprintf(":%d", util.TmpPort()) +// grpcAddress3 := fmt.Sprintf(":%d", util.TmpPort()) +// httpAddress3 := fmt.Sprintf(":%d", util.TmpPort()) +// dir3 := util.TmpDir() +// defer func() { +// _ = os.RemoveAll(dir3) +// }() +// +// // Raft server +// raftServer3, err := NewRaftServer("node3", raftAddress3, dir3, indexMapping, false, logger) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// if err := raftServer3.Stop(); err != nil { +// t.Fatalf("%v", err) +// } +// }() +// if err := raftServer3.Start(); err != nil { +// t.Fatalf("%v", err) +// } +// +// // gRPC service // grpcService3, err := NewGRPCService(raftServer3, certificateFile, commonName, logger) // if err != nil { // t.Fatalf("%v", err) @@ -514,6 +757,23 @@ func Test_GRPCService_Node(t *testing.T) { // t.Fatalf("%v", err) // } // +// // gRPC server +// grpcServer3 := grpc.NewServer( +// opts..., +// ) +// protobuf.RegisterIndexServer(grpcServer3, grpcService3) +// listener3, err := net.Listen("tcp", grpcAddress3) +// if err != nil { +// t.Fatalf("%v", err) +// } +// defer func() { +// grpcServer3.Stop() +// }() +// go func() { +// if err := grpcServer3.Serve(listener3); err != nil { +// t.Fatalf("%v", err) +// } +// }() // time.Sleep(3 * time.Second) // // ctx := context.Background() From 4e675c286205579ff7003e59dda66544e9e3935b Mon Sep 17 00:00:00 2001 From: Minoru OSUKA Date: Fri, 12 Jun 2020 22:29:03 +0900 Subject: [PATCH 60/76] Update raft (#132) --- go.mod | 4 ++-- go.sum | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index badce95..526617e 100644 --- a/go.mod +++ b/go.mod @@ -3,13 +3,13 @@ module github.com/mosuka/blast go 1.14 require ( - github.com/bbva/raft-badger v1.0.0 + github.com/bbva/raft-badger v1.0.1 github.com/blevesearch/bleve v1.0.9 github.com/blevesearch/cld2 v0.0.0-20200327141045-8b5f551d37f5 // indirect github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 // indirect - github.com/dgraph-io/badger/v2 v2.0.0 + github.com/dgraph-io/badger/v2 v2.0.3 github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect diff --git a/go.sum b/go.sum index 3e13b3c..653cfd7 100644 --- a/go.sum +++ b/go.sum @@ -17,8 +17,8 @@ github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQY github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= -github.com/bbva/raft-badger v1.0.0 h1:N8C2rELUxfrVZhtyCBja/ymhv8cvPhVB+3ab2ob9mkk= -github.com/bbva/raft-badger v1.0.0/go.mod h1:yQjfHBXGV55aXOoEAuNGNlIIGvGNbSG85gOLhfo0pDM= +github.com/bbva/raft-badger v1.0.1 h1:CytsAQ3KbyX/I73Sp+shryUUVL7eElWpfsNmV/6vDcM= +github.com/bbva/raft-badger v1.0.1/go.mod h1:g7ufi3iTshR7TjNy5GyTPGzNS2/gKl2wK27d5QYRsZw= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -83,10 +83,10 @@ github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/badger/v2 v2.0.0 h1:Cr05o2TUd2IcLbEY0aGd8mbjm1YyQpy+dswo3BcDXrE= -github.com/dgraph-io/badger/v2 v2.0.0/go.mod h1:YoRSIp1LmAJ7zH7tZwRvjNMUYLxB4wl3ebYkaIruZ04= -github.com/dgraph-io/ristretto v0.0.0-20191025175511-c1f00be0418e h1:aeUNgwup7PnDOBAD1BOKAqzb/W/NksOj6r3dwKKuqfg= -github.com/dgraph-io/ristretto v0.0.0-20191025175511-c1f00be0418e/go.mod h1:edzKIzGvqUCMzhTVWbiTSe75zD9Xxq0GtSBtFmaUTZs= +github.com/dgraph-io/badger/v2 v2.0.3 h1:inzdf6VF/NZ+tJ8RwwYMjJMvsOALTHYdozn0qSl6XJI= +github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM= +github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3 h1:MQLRM35Pp0yAyBYksjbj1nZI/w6eyRY/mWoM1sFf4kU= +github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= From 746a185312888a090c582676fa82143959ee0102 Mon Sep 17 00:00:00 2001 From: Minoru OSUKA Date: Fri, 12 Jun 2020 22:29:52 +0900 Subject: [PATCH 61/76] Update CHANGES.md --- CHANGES.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 0a91c43..4d3ea37 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] +- Update raft #132 +- Update tests #131 - Upgrade Bleve to v1.0.9 #130 - Add test #129 From 21f9448ee6acf908692203d6019279be5361264d Mon Sep 17 00:00:00 2001 From: Minoru OSUKA Date: Fri, 12 Jun 2020 22:36:42 +0900 Subject: [PATCH 62/76] Update gRPC (#133) --- go.mod | 6 +++--- go.sum | 20 ++++++++++++-------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index 526617e..52440b9 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/gorilla/handlers v1.4.2 github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/grpc-ecosystem/grpc-gateway v1.14.3 + github.com/grpc-ecosystem/grpc-gateway v1.14.6 github.com/hashicorp/raft v1.1.2 github.com/ikawaha/kagome.ipadic v1.1.2 // indirect github.com/jmhodges/levigo v1.0.0 // indirect @@ -33,7 +33,7 @@ require ( github.com/tebeka/snowball v0.4.1 // indirect github.com/tecbot/gorocksdb v0.0.0-20190705090504-162552197222 // indirect go.uber.org/zap v1.14.1 - google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c - google.golang.org/grpc v1.28.0 + google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 + google.golang.org/grpc v1.29.1 gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect ) diff --git a/go.sum b/go.sum index 653cfd7..d8e7bbb 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -13,7 +14,7 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= @@ -96,6 +97,7 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8 github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= @@ -153,8 +155,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9G github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.14.3 h1:OCJlWkOUoTnl0neNGlf4fUm3TmbEtguw7vR+nGtnDjY= -github.com/grpc-ecosystem/grpc-gateway v1.14.3/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0= +github.com/grpc-ecosystem/grpc-gateway v1.14.6 h1:8ERzHx8aj1Sc47mu9n/AksaKCSWrMchFtkdrS4BIj5o= +github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= @@ -357,6 +359,7 @@ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -367,6 +370,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191002035440-2ec189313ef0 h1:2mqDk8w/o6UmeUCu5Qiq2y7iMf6anbx+YA8d1JFoFrs= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -414,15 +418,15 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c h1:hrpEMCZ2O7DR5gC1n2AJGVhrwiEjOi35+jxtIuZpTMo= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 h1:fiNLklpBwWK1mth30Hlwk+fcdBmIALlgF5iy77O37Ig= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From cf429db6f59451500b8923f109f9bf326b8a32c5 Mon Sep 17 00:00:00 2001 From: Minoru OSUKA Date: Fri, 12 Jun 2020 22:37:10 +0900 Subject: [PATCH 63/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index 4d3ea37..c9edfac 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,6 +7,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] +- Update gRPC #133 - Update raft #132 - Update tests #131 - Upgrade Bleve to v1.0.9 #130 From 66d5f891151b0628765b7b82fbfd5cdbb02c721b Mon Sep 17 00:00:00 2001 From: Minoru OSUKA Date: Fri, 12 Jun 2020 22:41:20 +0900 Subject: [PATCH 64/76] Update zap (#134) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 52440b9..4cc4776 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/spf13/viper v1.4.0 github.com/tebeka/snowball v0.4.1 // indirect github.com/tecbot/gorocksdb v0.0.0-20190705090504-162552197222 // indirect - go.uber.org/zap v1.14.1 + go.uber.org/zap v1.15.0 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 google.golang.org/grpc v1.29.1 gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect diff --git a/go.sum b/go.sum index d8e7bbb..dd577f5 100644 --- a/go.sum +++ b/go.sum @@ -339,8 +339,8 @@ go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKY go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= -go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= From 9d91ac101d6de1c2bc03eb5db0b962ac062ad8d7 Mon Sep 17 00:00:00 2001 From: Minoru OSUKA Date: Fri, 12 Jun 2020 22:41:42 +0900 Subject: [PATCH 65/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index c9edfac..1f334ff 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,6 +7,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] +- Update zap #134 - Update gRPC #133 - Update raft #132 - Update tests #131 From 390032ea4389df09e67d415afceaa33c06a69e7e Mon Sep 17 00:00:00 2001 From: Minoru OSUKA Date: Fri, 12 Jun 2020 23:01:14 +0900 Subject: [PATCH 66/76] Update protocol buffers (#135) --- go.mod | 3 +- go.sum | 17 +- protobuf/index.pb.go | 2503 +++++++++++++++++++++++++++--------------- protobuf/index.proto | 2 +- 4 files changed, 1651 insertions(+), 874 deletions(-) diff --git a/go.mod b/go.mod index 4cc4776..1484b6d 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect github.com/gogo/protobuf v1.3.0 // indirect - github.com/golang/protobuf v1.3.5 + github.com/golang/protobuf v1.4.2 github.com/gorilla/handlers v1.4.2 github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 @@ -35,5 +35,6 @@ require ( go.uber.org/zap v1.15.0 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 google.golang.org/grpc v1.29.1 + google.golang.org/protobuf v1.23.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect ) diff --git a/go.sum b/go.sum index dd577f5..35defcb 100644 --- a/go.sum +++ b/go.sum @@ -132,13 +132,19 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -427,6 +433,13 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/protobuf/index.pb.go b/protobuf/index.pb.go index cddc642..b3197d3 100644 --- a/protobuf/index.pb.go +++ b/protobuf/index.pb.go @@ -1,32 +1,36 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.11.4 // source: protobuf/index.proto package protobuf import ( context "context" - fmt "fmt" proto "github.com/golang/protobuf/proto" any "github.com/golang/protobuf/ptypes/any" empty "github.com/golang/protobuf/ptypes/empty" - _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type Event_Type int32 @@ -40,1237 +44,1996 @@ const ( Event_BulkDelete Event_Type = 6 ) -var Event_Type_name = map[int32]string{ - 0: "Unknown", - 1: "Join", - 2: "Leave", - 3: "Set", - 4: "Delete", - 5: "BulkIndex", - 6: "BulkDelete", -} +// Enum value maps for Event_Type. +var ( + Event_Type_name = map[int32]string{ + 0: "Unknown", + 1: "Join", + 2: "Leave", + 3: "Set", + 4: "Delete", + 5: "BulkIndex", + 6: "BulkDelete", + } + Event_Type_value = map[string]int32{ + "Unknown": 0, + "Join": 1, + "Leave": 2, + "Set": 3, + "Delete": 4, + "BulkIndex": 5, + "BulkDelete": 6, + } +) -var Event_Type_value = map[string]int32{ - "Unknown": 0, - "Join": 1, - "Leave": 2, - "Set": 3, - "Delete": 4, - "BulkIndex": 5, - "BulkDelete": 6, +func (x Event_Type) Enum() *Event_Type { + p := new(Event_Type) + *p = x + return p } func (x Event_Type) String() string { - return proto.EnumName(Event_Type_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (Event_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{23, 0} +func (Event_Type) Descriptor() protoreflect.EnumDescriptor { + return file_protobuf_index_proto_enumTypes[0].Descriptor() } -type LivenessCheckResponse struct { - Alive bool `protobuf:"varint,1,opt,name=alive,proto3" json:"alive,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (Event_Type) Type() protoreflect.EnumType { + return &file_protobuf_index_proto_enumTypes[0] } -func (m *LivenessCheckResponse) Reset() { *m = LivenessCheckResponse{} } -func (m *LivenessCheckResponse) String() string { return proto.CompactTextString(m) } -func (*LivenessCheckResponse) ProtoMessage() {} -func (*LivenessCheckResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{0} +func (x Event_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } -func (m *LivenessCheckResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LivenessCheckResponse.Unmarshal(m, b) +// Deprecated: Use Event_Type.Descriptor instead. +func (Event_Type) EnumDescriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{23, 0} } -func (m *LivenessCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LivenessCheckResponse.Marshal(b, m, deterministic) + +type LivenessCheckResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Alive bool `protobuf:"varint,1,opt,name=alive,proto3" json:"alive,omitempty"` } -func (m *LivenessCheckResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LivenessCheckResponse.Merge(m, src) + +func (x *LivenessCheckResponse) Reset() { + *x = LivenessCheckResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *LivenessCheckResponse) XXX_Size() int { - return xxx_messageInfo_LivenessCheckResponse.Size(m) + +func (x *LivenessCheckResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *LivenessCheckResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LivenessCheckResponse.DiscardUnknown(m) + +func (*LivenessCheckResponse) ProtoMessage() {} + +func (x *LivenessCheckResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_LivenessCheckResponse proto.InternalMessageInfo +// Deprecated: Use LivenessCheckResponse.ProtoReflect.Descriptor instead. +func (*LivenessCheckResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{0} +} -func (m *LivenessCheckResponse) GetAlive() bool { - if m != nil { - return m.Alive +func (x *LivenessCheckResponse) GetAlive() bool { + if x != nil { + return x.Alive } return false } type ReadinessCheckResponse struct { - Ready bool `protobuf:"varint,1,opt,name=ready,proto3" json:"ready,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *ReadinessCheckResponse) Reset() { *m = ReadinessCheckResponse{} } -func (m *ReadinessCheckResponse) String() string { return proto.CompactTextString(m) } -func (*ReadinessCheckResponse) ProtoMessage() {} -func (*ReadinessCheckResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{1} + Ready bool `protobuf:"varint,1,opt,name=ready,proto3" json:"ready,omitempty"` } -func (m *ReadinessCheckResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadinessCheckResponse.Unmarshal(m, b) -} -func (m *ReadinessCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadinessCheckResponse.Marshal(b, m, deterministic) -} -func (m *ReadinessCheckResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadinessCheckResponse.Merge(m, src) +func (x *ReadinessCheckResponse) Reset() { + *x = ReadinessCheckResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ReadinessCheckResponse) XXX_Size() int { - return xxx_messageInfo_ReadinessCheckResponse.Size(m) + +func (x *ReadinessCheckResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ReadinessCheckResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReadinessCheckResponse.DiscardUnknown(m) + +func (*ReadinessCheckResponse) ProtoMessage() {} + +func (x *ReadinessCheckResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ReadinessCheckResponse proto.InternalMessageInfo +// Deprecated: Use ReadinessCheckResponse.ProtoReflect.Descriptor instead. +func (*ReadinessCheckResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{1} +} -func (m *ReadinessCheckResponse) GetReady() bool { - if m != nil { - return m.Ready +func (x *ReadinessCheckResponse) GetReady() bool { + if x != nil { + return x.Ready } return false } type Metadata struct { - GrpcAddress string `protobuf:"bytes,1,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` - HttpAddress string `protobuf:"bytes,2,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{2} + GrpcAddress string `protobuf:"bytes,1,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` + HttpAddress string `protobuf:"bytes,2,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` } -func (m *Metadata) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metadata.Unmarshal(m, b) -} -func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) -} -func (m *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(m, src) +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Metadata) XXX_Size() int { - return xxx_messageInfo_Metadata.Size(m) + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Metadata) XXX_DiscardUnknown() { - xxx_messageInfo_Metadata.DiscardUnknown(m) + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Metadata proto.InternalMessageInfo +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{2} +} -func (m *Metadata) GetGrpcAddress() string { - if m != nil { - return m.GrpcAddress +func (x *Metadata) GetGrpcAddress() string { + if x != nil { + return x.GrpcAddress } return "" } -func (m *Metadata) GetHttpAddress() string { - if m != nil { - return m.HttpAddress +func (x *Metadata) GetHttpAddress() string { + if x != nil { + return x.HttpAddress } return "" } type Node struct { - RaftAddress string `protobuf:"bytes,1,opt,name=raft_address,json=raftAddress,proto3" json:"raft_address,omitempty"` - Metadata *Metadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` - State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Node) Reset() { *m = Node{} } -func (m *Node) String() string { return proto.CompactTextString(m) } -func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{3} + RaftAddress string `protobuf:"bytes,1,opt,name=raft_address,json=raftAddress,proto3" json:"raft_address,omitempty"` + Metadata *Metadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state,omitempty"` } -func (m *Node) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Node.Unmarshal(m, b) -} -func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Node.Marshal(b, m, deterministic) -} -func (m *Node) XXX_Merge(src proto.Message) { - xxx_messageInfo_Node.Merge(m, src) +func (x *Node) Reset() { + *x = Node{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Node) XXX_Size() int { - return xxx_messageInfo_Node.Size(m) + +func (x *Node) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Node) XXX_DiscardUnknown() { - xxx_messageInfo_Node.DiscardUnknown(m) + +func (*Node) ProtoMessage() {} + +func (x *Node) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Node proto.InternalMessageInfo +// Deprecated: Use Node.ProtoReflect.Descriptor instead. +func (*Node) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{3} +} -func (m *Node) GetRaftAddress() string { - if m != nil { - return m.RaftAddress +func (x *Node) GetRaftAddress() string { + if x != nil { + return x.RaftAddress } return "" } -func (m *Node) GetMetadata() *Metadata { - if m != nil { - return m.Metadata +func (x *Node) GetMetadata() *Metadata { + if x != nil { + return x.Metadata } return nil } -func (m *Node) GetState() string { - if m != nil { - return m.State +func (x *Node) GetState() string { + if x != nil { + return x.State } return "" } type Cluster struct { - Nodes map[string]*Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Leader string `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Cluster) Reset() { *m = Cluster{} } -func (m *Cluster) String() string { return proto.CompactTextString(m) } -func (*Cluster) ProtoMessage() {} -func (*Cluster) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{4} + Nodes map[string]*Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Leader string `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"` } -func (m *Cluster) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Cluster.Unmarshal(m, b) -} -func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) -} -func (m *Cluster) XXX_Merge(src proto.Message) { - xxx_messageInfo_Cluster.Merge(m, src) +func (x *Cluster) Reset() { + *x = Cluster{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Cluster) XXX_Size() int { - return xxx_messageInfo_Cluster.Size(m) + +func (x *Cluster) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Cluster) XXX_DiscardUnknown() { - xxx_messageInfo_Cluster.DiscardUnknown(m) + +func (*Cluster) ProtoMessage() {} + +func (x *Cluster) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Cluster proto.InternalMessageInfo +// Deprecated: Use Cluster.ProtoReflect.Descriptor instead. +func (*Cluster) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{4} +} -func (m *Cluster) GetNodes() map[string]*Node { - if m != nil { - return m.Nodes +func (x *Cluster) GetNodes() map[string]*Node { + if x != nil { + return x.Nodes } return nil } -func (m *Cluster) GetLeader() string { - if m != nil { - return m.Leader +func (x *Cluster) GetLeader() string { + if x != nil { + return x.Leader } return "" } type JoinRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *JoinRequest) Reset() { *m = JoinRequest{} } -func (m *JoinRequest) String() string { return proto.CompactTextString(m) } -func (*JoinRequest) ProtoMessage() {} -func (*JoinRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{5} + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` } -func (m *JoinRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_JoinRequest.Unmarshal(m, b) -} -func (m *JoinRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_JoinRequest.Marshal(b, m, deterministic) -} -func (m *JoinRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_JoinRequest.Merge(m, src) +func (x *JoinRequest) Reset() { + *x = JoinRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *JoinRequest) XXX_Size() int { - return xxx_messageInfo_JoinRequest.Size(m) + +func (x *JoinRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *JoinRequest) XXX_DiscardUnknown() { - xxx_messageInfo_JoinRequest.DiscardUnknown(m) + +func (*JoinRequest) ProtoMessage() {} + +func (x *JoinRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_JoinRequest proto.InternalMessageInfo +// Deprecated: Use JoinRequest.ProtoReflect.Descriptor instead. +func (*JoinRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{5} +} -func (m *JoinRequest) GetId() string { - if m != nil { - return m.Id +func (x *JoinRequest) GetId() string { + if x != nil { + return x.Id } return "" } -func (m *JoinRequest) GetNode() *Node { - if m != nil { - return m.Node +func (x *JoinRequest) GetNode() *Node { + if x != nil { + return x.Node } return nil } type LeaveRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *LeaveRequest) Reset() { *m = LeaveRequest{} } -func (m *LeaveRequest) String() string { return proto.CompactTextString(m) } -func (*LeaveRequest) ProtoMessage() {} -func (*LeaveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{6} + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` } -func (m *LeaveRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LeaveRequest.Unmarshal(m, b) -} -func (m *LeaveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LeaveRequest.Marshal(b, m, deterministic) -} -func (m *LeaveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaveRequest.Merge(m, src) +func (x *LeaveRequest) Reset() { + *x = LeaveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *LeaveRequest) XXX_Size() int { - return xxx_messageInfo_LeaveRequest.Size(m) + +func (x *LeaveRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *LeaveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaveRequest.DiscardUnknown(m) + +func (*LeaveRequest) ProtoMessage() {} + +func (x *LeaveRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_LeaveRequest proto.InternalMessageInfo +// Deprecated: Use LeaveRequest.ProtoReflect.Descriptor instead. +func (*LeaveRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{6} +} -func (m *LeaveRequest) GetId() string { - if m != nil { - return m.Id +func (x *LeaveRequest) GetId() string { + if x != nil { + return x.Id } return "" } type NodeResponse struct { - Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *NodeResponse) Reset() { *m = NodeResponse{} } -func (m *NodeResponse) String() string { return proto.CompactTextString(m) } -func (*NodeResponse) ProtoMessage() {} -func (*NodeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{7} + Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` } -func (m *NodeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeResponse.Unmarshal(m, b) -} -func (m *NodeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeResponse.Marshal(b, m, deterministic) -} -func (m *NodeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeResponse.Merge(m, src) +func (x *NodeResponse) Reset() { + *x = NodeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NodeResponse) XXX_Size() int { - return xxx_messageInfo_NodeResponse.Size(m) + +func (x *NodeResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NodeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeResponse.DiscardUnknown(m) + +func (*NodeResponse) ProtoMessage() {} + +func (x *NodeResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NodeResponse proto.InternalMessageInfo +// Deprecated: Use NodeResponse.ProtoReflect.Descriptor instead. +func (*NodeResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{7} +} -func (m *NodeResponse) GetNode() *Node { - if m != nil { - return m.Node +func (x *NodeResponse) GetNode() *Node { + if x != nil { + return x.Node } return nil } type ClusterResponse struct { - Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *ClusterResponse) Reset() { *m = ClusterResponse{} } -func (m *ClusterResponse) String() string { return proto.CompactTextString(m) } -func (*ClusterResponse) ProtoMessage() {} -func (*ClusterResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{8} + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` } -func (m *ClusterResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClusterResponse.Unmarshal(m, b) -} -func (m *ClusterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClusterResponse.Marshal(b, m, deterministic) -} -func (m *ClusterResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterResponse.Merge(m, src) +func (x *ClusterResponse) Reset() { + *x = ClusterResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ClusterResponse) XXX_Size() int { - return xxx_messageInfo_ClusterResponse.Size(m) + +func (x *ClusterResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ClusterResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterResponse.DiscardUnknown(m) + +func (*ClusterResponse) ProtoMessage() {} + +func (x *ClusterResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ClusterResponse proto.InternalMessageInfo +// Deprecated: Use ClusterResponse.ProtoReflect.Descriptor instead. +func (*ClusterResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{8} +} -func (m *ClusterResponse) GetCluster() *Cluster { - if m != nil { - return m.Cluster +func (x *ClusterResponse) GetCluster() *Cluster { + if x != nil { + return x.Cluster } return nil } type Document struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Fields []byte `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Document) Reset() { *m = Document{} } -func (m *Document) String() string { return proto.CompactTextString(m) } -func (*Document) ProtoMessage() {} -func (*Document) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{9} + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Fields []byte `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` } -func (m *Document) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Document.Unmarshal(m, b) -} -func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Document.Marshal(b, m, deterministic) -} -func (m *Document) XXX_Merge(src proto.Message) { - xxx_messageInfo_Document.Merge(m, src) +func (x *Document) Reset() { + *x = Document{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Document) XXX_Size() int { - return xxx_messageInfo_Document.Size(m) + +func (x *Document) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Document) XXX_DiscardUnknown() { - xxx_messageInfo_Document.DiscardUnknown(m) + +func (*Document) ProtoMessage() {} + +func (x *Document) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Document proto.InternalMessageInfo +// Deprecated: Use Document.ProtoReflect.Descriptor instead. +func (*Document) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{9} +} -func (m *Document) GetId() string { - if m != nil { - return m.Id +func (x *Document) GetId() string { + if x != nil { + return x.Id } return "" } -func (m *Document) GetFields() []byte { - if m != nil { - return m.Fields +func (x *Document) GetFields() []byte { + if x != nil { + return x.Fields } return nil } type GetRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *GetRequest) Reset() { *m = GetRequest{} } -func (m *GetRequest) String() string { return proto.CompactTextString(m) } -func (*GetRequest) ProtoMessage() {} -func (*GetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{10} + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` } -func (m *GetRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetRequest.Unmarshal(m, b) -} -func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) -} -func (m *GetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetRequest.Merge(m, src) +func (x *GetRequest) Reset() { + *x = GetRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GetRequest) XXX_Size() int { - return xxx_messageInfo_GetRequest.Size(m) + +func (x *GetRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetRequest.DiscardUnknown(m) + +func (*GetRequest) ProtoMessage() {} + +func (x *GetRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_GetRequest proto.InternalMessageInfo +// Deprecated: Use GetRequest.ProtoReflect.Descriptor instead. +func (*GetRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{10} +} -func (m *GetRequest) GetId() string { - if m != nil { - return m.Id +func (x *GetRequest) GetId() string { + if x != nil { + return x.Id } return "" } type GetResponse struct { - Fields []byte `protobuf:"bytes,1,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *GetResponse) Reset() { *m = GetResponse{} } -func (m *GetResponse) String() string { return proto.CompactTextString(m) } -func (*GetResponse) ProtoMessage() {} -func (*GetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{11} + Fields []byte `protobuf:"bytes,1,opt,name=fields,proto3" json:"fields,omitempty"` } -func (m *GetResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetResponse.Unmarshal(m, b) -} -func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) -} -func (m *GetResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetResponse.Merge(m, src) +func (x *GetResponse) Reset() { + *x = GetResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GetResponse) XXX_Size() int { - return xxx_messageInfo_GetResponse.Size(m) + +func (x *GetResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetResponse.DiscardUnknown(m) + +func (*GetResponse) ProtoMessage() {} + +func (x *GetResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_GetResponse proto.InternalMessageInfo +// Deprecated: Use GetResponse.ProtoReflect.Descriptor instead. +func (*GetResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{11} +} -func (m *GetResponse) GetFields() []byte { - if m != nil { - return m.Fields +func (x *GetResponse) GetFields() []byte { + if x != nil { + return x.Fields } return nil } type SetRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Fields []byte `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *SetRequest) Reset() { *m = SetRequest{} } -func (m *SetRequest) String() string { return proto.CompactTextString(m) } -func (*SetRequest) ProtoMessage() {} -func (*SetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{12} + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Fields []byte `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` } -func (m *SetRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetRequest.Unmarshal(m, b) -} -func (m *SetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetRequest.Marshal(b, m, deterministic) -} -func (m *SetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetRequest.Merge(m, src) +func (x *SetRequest) Reset() { + *x = SetRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *SetRequest) XXX_Size() int { - return xxx_messageInfo_SetRequest.Size(m) + +func (x *SetRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetRequest.DiscardUnknown(m) + +func (*SetRequest) ProtoMessage() {} + +func (x *SetRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_SetRequest proto.InternalMessageInfo +// Deprecated: Use SetRequest.ProtoReflect.Descriptor instead. +func (*SetRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{12} +} -func (m *SetRequest) GetId() string { - if m != nil { - return m.Id +func (x *SetRequest) GetId() string { + if x != nil { + return x.Id } return "" } -func (m *SetRequest) GetFields() []byte { - if m != nil { - return m.Fields +func (x *SetRequest) GetFields() []byte { + if x != nil { + return x.Fields } return nil } type DeleteRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRequest) ProtoMessage() {} -func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{13} + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` } -func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) -} -func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) -} -func (m *DeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRequest.Merge(m, src) +func (x *DeleteRequest) Reset() { + *x = DeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *DeleteRequest) XXX_Size() int { - return xxx_messageInfo_DeleteRequest.Size(m) + +func (x *DeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRequest.DiscardUnknown(m) + +func (*DeleteRequest) ProtoMessage() {} + +func (x *DeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo +// Deprecated: Use DeleteRequest.ProtoReflect.Descriptor instead. +func (*DeleteRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{13} +} -func (m *DeleteRequest) GetId() string { - if m != nil { - return m.Id +func (x *DeleteRequest) GetId() string { + if x != nil { + return x.Id } return "" } type BulkIndexRequest struct { - Requests []*SetRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *BulkIndexRequest) Reset() { *m = BulkIndexRequest{} } -func (m *BulkIndexRequest) String() string { return proto.CompactTextString(m) } -func (*BulkIndexRequest) ProtoMessage() {} -func (*BulkIndexRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{14} + Requests []*SetRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` } -func (m *BulkIndexRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkIndexRequest.Unmarshal(m, b) -} -func (m *BulkIndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkIndexRequest.Marshal(b, m, deterministic) -} -func (m *BulkIndexRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkIndexRequest.Merge(m, src) +func (x *BulkIndexRequest) Reset() { + *x = BulkIndexRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *BulkIndexRequest) XXX_Size() int { - return xxx_messageInfo_BulkIndexRequest.Size(m) + +func (x *BulkIndexRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *BulkIndexRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BulkIndexRequest.DiscardUnknown(m) + +func (*BulkIndexRequest) ProtoMessage() {} + +func (x *BulkIndexRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_BulkIndexRequest proto.InternalMessageInfo +// Deprecated: Use BulkIndexRequest.ProtoReflect.Descriptor instead. +func (*BulkIndexRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{14} +} -func (m *BulkIndexRequest) GetRequests() []*SetRequest { - if m != nil { - return m.Requests +func (x *BulkIndexRequest) GetRequests() []*SetRequest { + if x != nil { + return x.Requests } return nil } type BulkIndexResponse struct { - Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *BulkIndexResponse) Reset() { *m = BulkIndexResponse{} } -func (m *BulkIndexResponse) String() string { return proto.CompactTextString(m) } -func (*BulkIndexResponse) ProtoMessage() {} -func (*BulkIndexResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{15} + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` } -func (m *BulkIndexResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkIndexResponse.Unmarshal(m, b) -} -func (m *BulkIndexResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkIndexResponse.Marshal(b, m, deterministic) -} -func (m *BulkIndexResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkIndexResponse.Merge(m, src) +func (x *BulkIndexResponse) Reset() { + *x = BulkIndexResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *BulkIndexResponse) XXX_Size() int { - return xxx_messageInfo_BulkIndexResponse.Size(m) + +func (x *BulkIndexResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *BulkIndexResponse) XXX_DiscardUnknown() { - xxx_messageInfo_BulkIndexResponse.DiscardUnknown(m) + +func (*BulkIndexResponse) ProtoMessage() {} + +func (x *BulkIndexResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_BulkIndexResponse proto.InternalMessageInfo +// Deprecated: Use BulkIndexResponse.ProtoReflect.Descriptor instead. +func (*BulkIndexResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{15} +} -func (m *BulkIndexResponse) GetCount() int32 { - if m != nil { - return m.Count +func (x *BulkIndexResponse) GetCount() int32 { + if x != nil { + return x.Count } return 0 } type BulkDeleteRequest struct { - Requests []*DeleteRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *BulkDeleteRequest) Reset() { *m = BulkDeleteRequest{} } -func (m *BulkDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*BulkDeleteRequest) ProtoMessage() {} -func (*BulkDeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{16} + Requests []*DeleteRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` } -func (m *BulkDeleteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkDeleteRequest.Unmarshal(m, b) -} -func (m *BulkDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkDeleteRequest.Marshal(b, m, deterministic) -} -func (m *BulkDeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkDeleteRequest.Merge(m, src) +func (x *BulkDeleteRequest) Reset() { + *x = BulkDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *BulkDeleteRequest) XXX_Size() int { - return xxx_messageInfo_BulkDeleteRequest.Size(m) + +func (x *BulkDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *BulkDeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BulkDeleteRequest.DiscardUnknown(m) + +func (*BulkDeleteRequest) ProtoMessage() {} + +func (x *BulkDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_BulkDeleteRequest proto.InternalMessageInfo +// Deprecated: Use BulkDeleteRequest.ProtoReflect.Descriptor instead. +func (*BulkDeleteRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{16} +} -func (m *BulkDeleteRequest) GetRequests() []*DeleteRequest { - if m != nil { - return m.Requests +func (x *BulkDeleteRequest) GetRequests() []*DeleteRequest { + if x != nil { + return x.Requests } return nil } type BulkDeleteResponse struct { - Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *BulkDeleteResponse) Reset() { *m = BulkDeleteResponse{} } -func (m *BulkDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*BulkDeleteResponse) ProtoMessage() {} -func (*BulkDeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{17} + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` } -func (m *BulkDeleteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkDeleteResponse.Unmarshal(m, b) -} -func (m *BulkDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkDeleteResponse.Marshal(b, m, deterministic) -} -func (m *BulkDeleteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkDeleteResponse.Merge(m, src) +func (x *BulkDeleteResponse) Reset() { + *x = BulkDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *BulkDeleteResponse) XXX_Size() int { - return xxx_messageInfo_BulkDeleteResponse.Size(m) + +func (x *BulkDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *BulkDeleteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_BulkDeleteResponse.DiscardUnknown(m) + +func (*BulkDeleteResponse) ProtoMessage() {} + +func (x *BulkDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_BulkDeleteResponse proto.InternalMessageInfo +// Deprecated: Use BulkDeleteResponse.ProtoReflect.Descriptor instead. +func (*BulkDeleteResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{17} +} -func (m *BulkDeleteResponse) GetCount() int32 { - if m != nil { - return m.Count +func (x *BulkDeleteResponse) GetCount() int32 { + if x != nil { + return x.Count } return 0 } type SetMetadataRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Metadata *Metadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *SetMetadataRequest) Reset() { *m = SetMetadataRequest{} } -func (m *SetMetadataRequest) String() string { return proto.CompactTextString(m) } -func (*SetMetadataRequest) ProtoMessage() {} -func (*SetMetadataRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{18} + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Metadata *Metadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` } -func (m *SetMetadataRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetMetadataRequest.Unmarshal(m, b) -} -func (m *SetMetadataRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetMetadataRequest.Marshal(b, m, deterministic) -} -func (m *SetMetadataRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetMetadataRequest.Merge(m, src) +func (x *SetMetadataRequest) Reset() { + *x = SetMetadataRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *SetMetadataRequest) XXX_Size() int { - return xxx_messageInfo_SetMetadataRequest.Size(m) + +func (x *SetMetadataRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SetMetadataRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetMetadataRequest.DiscardUnknown(m) + +func (*SetMetadataRequest) ProtoMessage() {} + +func (x *SetMetadataRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_SetMetadataRequest proto.InternalMessageInfo +// Deprecated: Use SetMetadataRequest.ProtoReflect.Descriptor instead. +func (*SetMetadataRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{18} +} -func (m *SetMetadataRequest) GetId() string { - if m != nil { - return m.Id +func (x *SetMetadataRequest) GetId() string { + if x != nil { + return x.Id } return "" } -func (m *SetMetadataRequest) GetMetadata() *Metadata { - if m != nil { - return m.Metadata +func (x *SetMetadataRequest) GetMetadata() *Metadata { + if x != nil { + return x.Metadata } return nil } type DeleteMetadataRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *DeleteMetadataRequest) Reset() { *m = DeleteMetadataRequest{} } -func (m *DeleteMetadataRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteMetadataRequest) ProtoMessage() {} -func (*DeleteMetadataRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{19} + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` } -func (m *DeleteMetadataRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteMetadataRequest.Unmarshal(m, b) -} -func (m *DeleteMetadataRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteMetadataRequest.Marshal(b, m, deterministic) -} -func (m *DeleteMetadataRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteMetadataRequest.Merge(m, src) +func (x *DeleteMetadataRequest) Reset() { + *x = DeleteMetadataRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *DeleteMetadataRequest) XXX_Size() int { - return xxx_messageInfo_DeleteMetadataRequest.Size(m) + +func (x *DeleteMetadataRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteMetadataRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteMetadataRequest.DiscardUnknown(m) + +func (*DeleteMetadataRequest) ProtoMessage() {} + +func (x *DeleteMetadataRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_DeleteMetadataRequest proto.InternalMessageInfo +// Deprecated: Use DeleteMetadataRequest.ProtoReflect.Descriptor instead. +func (*DeleteMetadataRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{19} +} -func (m *DeleteMetadataRequest) GetId() string { - if m != nil { - return m.Id +func (x *DeleteMetadataRequest) GetId() string { + if x != nil { + return x.Id } return "" } type SearchRequest struct { - SearchRequest []byte `protobuf:"bytes,1,opt,name=search_request,json=searchRequest,proto3" json:"search_request,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *SearchRequest) Reset() { *m = SearchRequest{} } -func (m *SearchRequest) String() string { return proto.CompactTextString(m) } -func (*SearchRequest) ProtoMessage() {} -func (*SearchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{20} + SearchRequest []byte `protobuf:"bytes,1,opt,name=search_request,json=searchRequest,proto3" json:"search_request,omitempty"` } -func (m *SearchRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SearchRequest.Unmarshal(m, b) -} -func (m *SearchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SearchRequest.Marshal(b, m, deterministic) -} -func (m *SearchRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SearchRequest.Merge(m, src) +func (x *SearchRequest) Reset() { + *x = SearchRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *SearchRequest) XXX_Size() int { - return xxx_messageInfo_SearchRequest.Size(m) + +func (x *SearchRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SearchRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SearchRequest.DiscardUnknown(m) + +func (*SearchRequest) ProtoMessage() {} + +func (x *SearchRequest) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_SearchRequest proto.InternalMessageInfo +// Deprecated: Use SearchRequest.ProtoReflect.Descriptor instead. +func (*SearchRequest) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{20} +} -func (m *SearchRequest) GetSearchRequest() []byte { - if m != nil { - return m.SearchRequest +func (x *SearchRequest) GetSearchRequest() []byte { + if x != nil { + return x.SearchRequest } return nil } type SearchResponse struct { - SearchResult []byte `protobuf:"bytes,1,opt,name=search_result,json=searchResult,proto3" json:"search_result,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *SearchResponse) Reset() { *m = SearchResponse{} } -func (m *SearchResponse) String() string { return proto.CompactTextString(m) } -func (*SearchResponse) ProtoMessage() {} -func (*SearchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{21} + SearchResult []byte `protobuf:"bytes,1,opt,name=search_result,json=searchResult,proto3" json:"search_result,omitempty"` } -func (m *SearchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SearchResponse.Unmarshal(m, b) -} -func (m *SearchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SearchResponse.Marshal(b, m, deterministic) -} -func (m *SearchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SearchResponse.Merge(m, src) +func (x *SearchResponse) Reset() { + *x = SearchResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *SearchResponse) XXX_Size() int { - return xxx_messageInfo_SearchResponse.Size(m) + +func (x *SearchResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SearchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SearchResponse.DiscardUnknown(m) + +func (*SearchResponse) ProtoMessage() {} + +func (x *SearchResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_SearchResponse proto.InternalMessageInfo +// Deprecated: Use SearchResponse.ProtoReflect.Descriptor instead. +func (*SearchResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{21} +} -func (m *SearchResponse) GetSearchResult() []byte { - if m != nil { - return m.SearchResult +func (x *SearchResponse) GetSearchResult() []byte { + if x != nil { + return x.SearchResult } return nil } type MappingResponse struct { - Mapping []byte `protobuf:"bytes,1,opt,name=mapping,proto3" json:"mapping,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *MappingResponse) Reset() { *m = MappingResponse{} } -func (m *MappingResponse) String() string { return proto.CompactTextString(m) } -func (*MappingResponse) ProtoMessage() {} -func (*MappingResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{22} + Mapping []byte `protobuf:"bytes,1,opt,name=mapping,proto3" json:"mapping,omitempty"` } -func (m *MappingResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MappingResponse.Unmarshal(m, b) -} -func (m *MappingResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MappingResponse.Marshal(b, m, deterministic) -} -func (m *MappingResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MappingResponse.Merge(m, src) +func (x *MappingResponse) Reset() { + *x = MappingResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *MappingResponse) XXX_Size() int { - return xxx_messageInfo_MappingResponse.Size(m) + +func (x *MappingResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *MappingResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MappingResponse.DiscardUnknown(m) + +func (*MappingResponse) ProtoMessage() {} + +func (x *MappingResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_MappingResponse proto.InternalMessageInfo +// Deprecated: Use MappingResponse.ProtoReflect.Descriptor instead. +func (*MappingResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{22} +} -func (m *MappingResponse) GetMapping() []byte { - if m != nil { - return m.Mapping +func (x *MappingResponse) GetMapping() []byte { + if x != nil { + return x.Mapping } return nil } type Event struct { - Type Event_Type `protobuf:"varint,1,opt,name=type,proto3,enum=index.Event_Type" json:"type,omitempty"` - Data *any.Any `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{23} + Type Event_Type `protobuf:"varint,1,opt,name=type,proto3,enum=index.Event_Type" json:"type,omitempty"` + Data *any.Any `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` } -func (m *Event) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Event.Unmarshal(m, b) -} -func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Event.Marshal(b, m, deterministic) -} -func (m *Event) XXX_Merge(src proto.Message) { - xxx_messageInfo_Event.Merge(m, src) +func (x *Event) Reset() { + *x = Event{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Event) XXX_Size() int { - return xxx_messageInfo_Event.Size(m) + +func (x *Event) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Event) XXX_DiscardUnknown() { - xxx_messageInfo_Event.DiscardUnknown(m) + +func (*Event) ProtoMessage() {} + +func (x *Event) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Event proto.InternalMessageInfo +// Deprecated: Use Event.ProtoReflect.Descriptor instead. +func (*Event) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{23} +} -func (m *Event) GetType() Event_Type { - if m != nil { - return m.Type +func (x *Event) GetType() Event_Type { + if x != nil { + return x.Type } return Event_Unknown } -func (m *Event) GetData() *any.Any { - if m != nil { - return m.Data +func (x *Event) GetData() *any.Any { + if x != nil { + return x.Data } return nil } type WatchResponse struct { - Event *Event `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *WatchResponse) Reset() { *m = WatchResponse{} } -func (m *WatchResponse) String() string { return proto.CompactTextString(m) } -func (*WatchResponse) ProtoMessage() {} -func (*WatchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{24} + Event *Event `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` } -func (m *WatchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_WatchResponse.Unmarshal(m, b) -} -func (m *WatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_WatchResponse.Marshal(b, m, deterministic) -} -func (m *WatchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchResponse.Merge(m, src) +func (x *WatchResponse) Reset() { + *x = WatchResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *WatchResponse) XXX_Size() int { - return xxx_messageInfo_WatchResponse.Size(m) + +func (x *WatchResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *WatchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WatchResponse.DiscardUnknown(m) + +func (*WatchResponse) ProtoMessage() {} + +func (x *WatchResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_WatchResponse proto.InternalMessageInfo +// Deprecated: Use WatchResponse.ProtoReflect.Descriptor instead. +func (*WatchResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{24} +} -func (m *WatchResponse) GetEvent() *Event { - if m != nil { - return m.Event +func (x *WatchResponse) GetEvent() *Event { + if x != nil { + return x.Event } return nil } type MetricsResponse struct { - Metrics []byte `protobuf:"bytes,1,opt,name=metrics,proto3" json:"metrics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *MetricsResponse) Reset() { *m = MetricsResponse{} } -func (m *MetricsResponse) String() string { return proto.CompactTextString(m) } -func (*MetricsResponse) ProtoMessage() {} -func (*MetricsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_28043ab4bd817113, []int{25} + Metrics []byte `protobuf:"bytes,1,opt,name=metrics,proto3" json:"metrics,omitempty"` } -func (m *MetricsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricsResponse.Unmarshal(m, b) -} -func (m *MetricsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricsResponse.Marshal(b, m, deterministic) -} -func (m *MetricsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsResponse.Merge(m, src) +func (x *MetricsResponse) Reset() { + *x = MetricsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_protobuf_index_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *MetricsResponse) XXX_Size() int { - return xxx_messageInfo_MetricsResponse.Size(m) + +func (x *MetricsResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *MetricsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MetricsResponse.DiscardUnknown(m) + +func (*MetricsResponse) ProtoMessage() {} + +func (x *MetricsResponse) ProtoReflect() protoreflect.Message { + mi := &file_protobuf_index_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_MetricsResponse proto.InternalMessageInfo +// Deprecated: Use MetricsResponse.ProtoReflect.Descriptor instead. +func (*MetricsResponse) Descriptor() ([]byte, []int) { + return file_protobuf_index_proto_rawDescGZIP(), []int{25} +} -func (m *MetricsResponse) GetMetrics() []byte { - if m != nil { - return m.Metrics +func (x *MetricsResponse) GetMetrics() []byte { + if x != nil { + return x.Metrics } return nil } -func init() { - proto.RegisterEnum("index.Event_Type", Event_Type_name, Event_Type_value) - proto.RegisterType((*LivenessCheckResponse)(nil), "index.LivenessCheckResponse") - proto.RegisterType((*ReadinessCheckResponse)(nil), "index.ReadinessCheckResponse") - proto.RegisterType((*Metadata)(nil), "index.Metadata") - proto.RegisterType((*Node)(nil), "index.Node") - proto.RegisterType((*Cluster)(nil), "index.Cluster") - proto.RegisterMapType((map[string]*Node)(nil), "index.Cluster.NodesEntry") - proto.RegisterType((*JoinRequest)(nil), "index.JoinRequest") - proto.RegisterType((*LeaveRequest)(nil), "index.LeaveRequest") - proto.RegisterType((*NodeResponse)(nil), "index.NodeResponse") - proto.RegisterType((*ClusterResponse)(nil), "index.ClusterResponse") - proto.RegisterType((*Document)(nil), "index.Document") - proto.RegisterType((*GetRequest)(nil), "index.GetRequest") - proto.RegisterType((*GetResponse)(nil), "index.GetResponse") - proto.RegisterType((*SetRequest)(nil), "index.SetRequest") - proto.RegisterType((*DeleteRequest)(nil), "index.DeleteRequest") - proto.RegisterType((*BulkIndexRequest)(nil), "index.BulkIndexRequest") - proto.RegisterType((*BulkIndexResponse)(nil), "index.BulkIndexResponse") - proto.RegisterType((*BulkDeleteRequest)(nil), "index.BulkDeleteRequest") - proto.RegisterType((*BulkDeleteResponse)(nil), "index.BulkDeleteResponse") - proto.RegisterType((*SetMetadataRequest)(nil), "index.SetMetadataRequest") - proto.RegisterType((*DeleteMetadataRequest)(nil), "index.DeleteMetadataRequest") - proto.RegisterType((*SearchRequest)(nil), "index.SearchRequest") - proto.RegisterType((*SearchResponse)(nil), "index.SearchResponse") - proto.RegisterType((*MappingResponse)(nil), "index.MappingResponse") - proto.RegisterType((*Event)(nil), "index.Event") - proto.RegisterType((*WatchResponse)(nil), "index.WatchResponse") - proto.RegisterType((*MetricsResponse)(nil), "index.MetricsResponse") -} - -func init() { proto.RegisterFile("protobuf/index.proto", fileDescriptor_28043ab4bd817113) } - -var fileDescriptor_28043ab4bd817113 = []byte{ - // 1166 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x57, 0xed, 0x6e, 0x1b, 0x45, - 0x17, 0xae, 0xbf, 0xdd, 0xe3, 0x8f, 0xb8, 0xa7, 0x76, 0xea, 0x6c, 0xd3, 0x36, 0x9d, 0x57, 0xd1, - 0x1b, 0x5c, 0x62, 0x97, 0x14, 0x10, 0x04, 0x81, 0x94, 0xb6, 0x56, 0x05, 0xa4, 0x51, 0xd9, 0x50, - 0x81, 0x00, 0x29, 0x9a, 0x78, 0x27, 0xce, 0x2a, 0xeb, 0xdd, 0x65, 0x77, 0xec, 0x62, 0xa1, 0xfe, - 0xe1, 0x16, 0xf8, 0xc5, 0x85, 0xc0, 0x8d, 0x70, 0x0b, 0x5c, 0x08, 0x9a, 0x8f, 0x5d, 0xef, 0xda, - 0xde, 0x46, 0xfc, 0xf2, 0xce, 0x9c, 0x67, 0x9e, 0xf3, 0xcc, 0x99, 0x99, 0xe7, 0xc8, 0xd0, 0xf6, - 0x03, 0x8f, 0x7b, 0xe7, 0xd3, 0x8b, 0x81, 0xed, 0x5a, 0xec, 0x97, 0xbe, 0x1c, 0x62, 0x49, 0x0e, - 0x8c, 0xad, 0xb1, 0xe7, 0x8d, 0x1d, 0x36, 0x88, 0x31, 0xd4, 0x9d, 0x2b, 0x84, 0x71, 0x77, 0x39, - 0xc4, 0x26, 0x3e, 0x8f, 0x82, 0xdb, 0x3a, 0x48, 0x7d, 0x7b, 0x40, 0x5d, 0xd7, 0xe3, 0x94, 0xdb, - 0x9e, 0x1b, 0xea, 0xe8, 0xfb, 0xf2, 0x67, 0xb4, 0x3f, 0x66, 0xee, 0x7e, 0xf8, 0x86, 0x8e, 0xc7, - 0x2c, 0x18, 0x78, 0xbe, 0x44, 0xac, 0xa2, 0xc9, 0x3e, 0x74, 0x8e, 0xed, 0x19, 0x73, 0x59, 0x18, - 0x3e, 0xbb, 0x64, 0xa3, 0x2b, 0x93, 0x85, 0xbe, 0xe7, 0x86, 0x0c, 0xdb, 0x50, 0xa2, 0x8e, 0x3d, - 0x63, 0xdd, 0xdc, 0x4e, 0x6e, 0xaf, 0x6a, 0xaa, 0x01, 0xe9, 0xc3, 0xa6, 0xc9, 0xa8, 0x65, 0xaf, - 0xc5, 0x07, 0x8c, 0x5a, 0xf3, 0x08, 0x2f, 0x07, 0xe4, 0x15, 0x54, 0x5f, 0x32, 0x4e, 0x2d, 0xca, - 0x29, 0x3e, 0x84, 0xfa, 0x38, 0xf0, 0x47, 0x67, 0xd4, 0xb2, 0x02, 0x16, 0x86, 0x12, 0x78, 0xd3, - 0xac, 0x89, 0xb9, 0x23, 0x35, 0x25, 0x20, 0x97, 0x9c, 0xfb, 0x31, 0x24, 0xaf, 0x20, 0x62, 0x4e, - 0x43, 0x88, 0x03, 0xc5, 0x13, 0xcf, 0x62, 0x02, 0x1a, 0xd0, 0x0b, 0xbe, 0xcc, 0x26, 0xe6, 0x22, - 0xb6, 0x47, 0x50, 0x9d, 0xe8, 0xe4, 0x92, 0xa9, 0x76, 0xb0, 0xd1, 0x57, 0xc7, 0x10, 0x69, 0x32, - 0x63, 0x80, 0xd0, 0x1f, 0x72, 0xca, 0x59, 0xb7, 0x20, 0x89, 0xd4, 0x80, 0xfc, 0x91, 0x83, 0xca, - 0x33, 0x67, 0x1a, 0x72, 0x16, 0xe0, 0x00, 0x4a, 0xae, 0x67, 0x31, 0x91, 0xaa, 0xb0, 0x57, 0x3b, - 0xd8, 0xd2, 0x5c, 0x3a, 0xdc, 0x17, 0xaa, 0xc2, 0xa1, 0xcb, 0x83, 0xb9, 0xa9, 0x70, 0xb8, 0x09, - 0x65, 0x87, 0x51, 0x8b, 0x05, 0x7a, 0x1f, 0x7a, 0x64, 0x0c, 0x01, 0x16, 0x60, 0x6c, 0x41, 0xe1, - 0x8a, 0xcd, 0xb5, 0x7e, 0xf1, 0x89, 0x0f, 0xa1, 0x34, 0xa3, 0xce, 0x94, 0x69, 0xd1, 0x35, 0x9d, - 0x48, 0xac, 0x31, 0x55, 0xe4, 0x30, 0xff, 0x49, 0x8e, 0x7c, 0x01, 0xb5, 0xaf, 0x3c, 0xdb, 0x35, - 0xd9, 0xcf, 0x53, 0x16, 0x72, 0x6c, 0x42, 0xde, 0xb6, 0x34, 0x4d, 0xde, 0xb6, 0xf0, 0x01, 0x14, - 0x85, 0x8c, 0x75, 0x24, 0x32, 0x40, 0xee, 0x43, 0xfd, 0x98, 0xd1, 0x19, 0xcb, 0x20, 0x20, 0x03, - 0xa8, 0x4b, 0x74, 0x74, 0xc2, 0x11, 0x61, 0x2e, 0x8b, 0xf0, 0x33, 0xd8, 0xd0, 0xc5, 0x88, 0xd7, - 0xec, 0x41, 0x65, 0xa4, 0xa6, 0xf4, 0xb2, 0x66, 0xba, 0x6a, 0x66, 0x14, 0x26, 0x07, 0x50, 0x7d, - 0xee, 0x8d, 0xa6, 0x13, 0xe6, 0xae, 0x6e, 0x65, 0x13, 0xca, 0x17, 0x36, 0x73, 0x2c, 0x75, 0x21, - 0xea, 0xa6, 0x1e, 0x91, 0x6d, 0x80, 0x17, 0x8c, 0x67, 0xe9, 0xdf, 0x85, 0x9a, 0x8c, 0x6a, 0x29, - 0x0b, 0x92, 0x5c, 0x8a, 0xe4, 0x43, 0x80, 0xd3, 0x4c, 0x92, 0xcc, 0xd4, 0x0f, 0xa0, 0xf1, 0x9c, - 0x39, 0x8c, 0x67, 0x56, 0xef, 0x08, 0x5a, 0x4f, 0xa7, 0xce, 0xd5, 0x97, 0x62, 0xb7, 0x11, 0x66, - 0x1f, 0xaa, 0x81, 0xfa, 0x8c, 0x2e, 0xd1, 0x2d, 0x5d, 0x8e, 0x85, 0x02, 0x33, 0x86, 0x90, 0xf7, - 0xe0, 0x56, 0x82, 0x62, 0xf1, 0xce, 0x46, 0xde, 0xd4, 0xe5, 0x32, 0x55, 0xc9, 0x54, 0x03, 0x32, - 0x54, 0xd0, 0xb4, 0xa4, 0xc7, 0x2b, 0xe9, 0xda, 0x3a, 0x5d, 0x0a, 0x97, 0xc8, 0xd8, 0x03, 0x4c, - 0xd2, 0xbc, 0x33, 0xe5, 0x37, 0x80, 0xa7, 0x8c, 0xc7, 0x2f, 0x29, 0xa3, 0x7e, 0xff, 0xe5, 0x0d, - 0x92, 0xff, 0x43, 0x47, 0xa5, 0xbe, 0x86, 0x95, 0x7c, 0x0c, 0x8d, 0x53, 0x46, 0x83, 0xd1, 0x65, - 0x04, 0xd8, 0x85, 0x66, 0x28, 0x27, 0xce, 0xf4, 0x5e, 0xf4, 0x21, 0x37, 0xc2, 0x24, 0x8c, 0x7c, - 0x04, 0xcd, 0x68, 0x9d, 0xde, 0xdb, 0xff, 0xa0, 0x11, 0x2f, 0x0c, 0xa7, 0x4e, 0xb4, 0xae, 0x1e, - 0xad, 0x13, 0x73, 0xe4, 0x11, 0x6c, 0xbc, 0xa4, 0xbe, 0x6f, 0xbb, 0xe3, 0x78, 0x5d, 0x17, 0x2a, - 0x13, 0x35, 0xa5, 0x57, 0x44, 0x43, 0xf2, 0x57, 0x0e, 0x4a, 0xc3, 0x99, 0xb8, 0xc6, 0xbb, 0x50, - 0xe4, 0x73, 0x5f, 0x3d, 0x98, 0x66, 0x7c, 0xd4, 0x32, 0xd6, 0xff, 0x76, 0xee, 0x33, 0x53, 0x86, - 0x71, 0x0f, 0x8a, 0x89, 0xf2, 0xb4, 0xfb, 0xca, 0xdd, 0xfb, 0x91, 0xf5, 0xf7, 0x8f, 0xdc, 0xb9, - 0x29, 0x11, 0xe4, 0x27, 0x28, 0x8a, 0x75, 0x58, 0x83, 0xca, 0x6b, 0xf7, 0xca, 0xf5, 0xde, 0xb8, - 0xad, 0x1b, 0x58, 0x85, 0xa2, 0xb0, 0x81, 0x56, 0x0e, 0x6f, 0x42, 0x49, 0x3e, 0xe8, 0x56, 0x1e, - 0x2b, 0x50, 0x38, 0x65, 0xbc, 0x55, 0x40, 0x80, 0xb2, 0x2a, 0x69, 0xab, 0x88, 0x0d, 0xb8, 0x19, - 0xdf, 0xa7, 0x56, 0x09, 0x9b, 0x00, 0x8b, 0xc3, 0x6e, 0x95, 0xc9, 0x13, 0x68, 0x7c, 0x47, 0x79, - 0xa2, 0x36, 0x04, 0x4a, 0x4c, 0x88, 0xd5, 0x4f, 0xb7, 0x9e, 0xdc, 0x80, 0xa9, 0x42, 0xb2, 0x34, - 0x8c, 0x07, 0xf6, 0x28, 0x4c, 0x95, 0x46, 0x4d, 0xc5, 0xa5, 0x51, 0xc3, 0x83, 0x3f, 0x01, 0x4a, - 0x32, 0x3b, 0x52, 0x68, 0xa4, 0xda, 0x0e, 0x6e, 0xae, 0x6c, 0x7b, 0x28, 0x3a, 0x9e, 0xb1, 0xad, - 0x93, 0xae, 0x6d, 0x52, 0xc4, 0xf8, 0xed, 0xef, 0x7f, 0x7e, 0xcf, 0xb7, 0x11, 0x07, 0xb3, 0x0f, - 0x06, 0x8e, 0x86, 0x9c, 0x8d, 0x24, 0xa3, 0x05, 0xcd, 0x74, 0xab, 0xca, 0xcc, 0x71, 0x4f, 0xe7, - 0x58, 0xdf, 0xd9, 0xc8, 0x5d, 0x99, 0xa4, 0x83, 0xb7, 0x45, 0x92, 0x20, 0xc2, 0xe8, 0x2c, 0x43, - 0xdd, 0x8e, 0xb2, 0xb8, 0x6f, 0x27, 0x6d, 0x32, 0x62, 0x6c, 0x49, 0x46, 0xc0, 0xaa, 0x60, 0x14, - 0xd6, 0x89, 0xa6, 0x3a, 0x44, 0x44, 0x0d, 0x4f, 0x18, 0xbb, 0x91, 0x41, 0x4d, 0xee, 0x4b, 0x96, - 0xae, 0xd1, 0x12, 0x2c, 0xda, 0x46, 0x07, 0xbf, 0xda, 0xd6, 0xdb, 0x43, 0x69, 0xc7, 0x78, 0xb2, - 0x68, 0x5d, 0x59, 0xea, 0x36, 0x97, 0xdc, 0x38, 0x12, 0x78, 0x5b, 0x52, 0x37, 0xb0, 0x96, 0xa0, - 0xc6, 0x13, 0x7d, 0xbd, 0x30, 0xda, 0x53, 0xb2, 0x7b, 0x64, 0xaa, 0xec, 0x4a, 0x2a, 0xec, 0xad, - 0xa8, 0xc4, 0x57, 0x50, 0x3d, 0x75, 0xa9, 0x1f, 0x5e, 0x7a, 0xfc, 0x1d, 0x02, 0xd7, 0xb3, 0xb6, - 0x25, 0x6b, 0x13, 0xeb, 0x82, 0x35, 0x8c, 0x58, 0xbe, 0x4f, 0x5c, 0x70, 0xbc, 0xa3, 0x55, 0x2e, - 0xbb, 0xb0, 0xd1, 0x5d, 0x0d, 0xe8, 0x6d, 0x6b, 0xad, 0x46, 0x43, 0xb0, 0x5a, 0xba, 0x1b, 0x85, - 0x87, 0xb9, 0x1e, 0xfe, 0x98, 0x7c, 0x2b, 0x98, 0x64, 0x48, 0x59, 0xa9, 0xb1, 0xb5, 0x26, 0x92, - 0x26, 0xef, 0xad, 0x92, 0x7f, 0x0d, 0x85, 0x17, 0x8c, 0x63, 0x64, 0x10, 0x8b, 0x96, 0x66, 0x60, - 0x72, 0x4a, 0xf3, 0xdc, 0x93, 0x3c, 0x77, 0xb0, 0x93, 0xe2, 0x11, 0x25, 0xfd, 0xbc, 0xd7, 0x7b, - 0x8b, 0xa6, 0x7c, 0xf9, 0xb8, 0xda, 0x58, 0x32, 0x6b, 0xb9, 0x23, 0x09, 0x0d, 0x63, 0x3d, 0xa1, - 0x10, 0xf8, 0x3a, 0x32, 0x11, 0x5c, 0xdb, 0x40, 0x32, 0x99, 0xb5, 0xd4, 0x5e, 0x86, 0xd4, 0x63, - 0x28, 0x2b, 0x37, 0x8e, 0x69, 0x53, 0xa6, 0x6e, 0x74, 0x96, 0x66, 0x75, 0x01, 0x3a, 0x92, 0x75, - 0x83, 0x80, 0x3c, 0x7b, 0x19, 0x13, 0x22, 0x4f, 0xa0, 0xa2, 0x4d, 0xfa, 0xda, 0xeb, 0xbe, 0x64, - 0xe6, 0xe9, 0xeb, 0xae, 0x7d, 0x1c, 0x3f, 0x85, 0x92, 0xb4, 0xc3, 0x4c, 0xb6, 0x48, 0x74, 0xca, - 0x34, 0xc9, 0x8d, 0xc7, 0x39, 0x29, 0x45, 0x59, 0xde, 0xf5, 0x52, 0xd2, 0xe6, 0xb9, 0x24, 0x45, - 0x05, 0x9f, 0x92, 0x1f, 0x76, 0xc6, 0x36, 0xbf, 0x9c, 0x9e, 0xf7, 0x47, 0xde, 0x64, 0x30, 0xf1, - 0xc2, 0xe9, 0x15, 0x1d, 0x9c, 0x3b, 0x34, 0xe4, 0xf1, 0x1f, 0x84, 0xf3, 0xb2, 0xfc, 0x7a, 0xf2, - 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xd3, 0x19, 0x60, 0x72, 0x0c, 0x00, 0x00, +var File_protobuf_index_proto protoreflect.FileDescriptor + +var file_protobuf_index_proto_rawDesc = []byte{ + 0x0a, 0x14, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x1a, 0x19, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, + 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x2d, 0x0a, 0x15, 0x4c, 0x69, 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x61, 0x6c, 0x69, + 0x76, 0x65, 0x22, 0x2e, 0x0a, 0x16, 0x52, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x65, 0x73, 0x73, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x72, 0x65, 0x61, + 0x64, 0x79, 0x22, 0x50, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x21, + 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x22, 0x6c, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x72, 0x61, 0x66, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x72, 0x61, 0x66, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x2b, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0f, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x22, 0x99, 0x01, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x2f, + 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4e, 0x6f, + 0x64, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x12, + 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x45, 0x0a, 0x0a, 0x4e, 0x6f, 0x64, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x21, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x4e, + 0x6f, 0x64, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3e, + 0x0a, 0x0b, 0x4a, 0x6f, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, + 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x22, 0x1e, + 0x0a, 0x0c, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x2f, + 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, + 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x22, + 0x3b, 0x0a, 0x0f, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x28, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x32, 0x0a, 0x08, + 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, + 0x22, 0x1c, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x25, + 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x73, 0x22, 0x34, 0x0a, 0x0a, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x22, 0x1f, 0x0a, 0x0d, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x41, 0x0a, 0x10, + 0x42, 0x75, 0x6c, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x2d, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, + 0x29, 0x0a, 0x11, 0x42, 0x75, 0x6c, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x45, 0x0a, 0x11, 0x42, 0x75, + 0x6c, 0x6b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x30, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x73, 0x22, 0x2a, 0x0a, 0x12, 0x42, 0x75, 0x6c, 0x6b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x51, 0x0a, + 0x12, 0x53, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x22, 0x27, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x36, 0x0a, 0x0d, 0x53, 0x65, 0x61, + 0x72, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, + 0x61, 0x72, 0x63, 0x68, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5f, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x73, 0x65, 0x61, 0x72, + 0x63, 0x68, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x2b, 0x0a, 0x0f, 0x4d, 0x61, 0x70, 0x70, + 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, + 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x61, + 0x70, 0x70, 0x69, 0x6e, 0x67, 0x22, 0xb6, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, + 0x25, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x22, 0x5c, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4a, 0x6f, 0x69, 0x6e, 0x10, 0x01, 0x12, + 0x09, 0x0a, 0x05, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x65, + 0x74, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x10, 0x04, 0x12, + 0x0d, 0x0a, 0x09, 0x42, 0x75, 0x6c, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x10, 0x05, 0x12, 0x0e, + 0x0a, 0x0a, 0x42, 0x75, 0x6c, 0x6b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x10, 0x06, 0x22, 0x33, + 0x0a, 0x0d, 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x22, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, + 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x22, 0x2b, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x32, 0xb5, 0x0a, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x61, 0x0a, 0x0d, 0x4c, 0x69, + 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x1c, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x4c, 0x69, 0x76, 0x65, + 0x6e, 0x65, 0x73, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, 0x76, 0x31, 0x2f, 0x6c, + 0x69, 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x64, 0x0a, + 0x0e, 0x52, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x65, 0x73, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, + 0x52, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x65, 0x73, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, + 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x12, 0x45, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x4e, 0x6f, 0x64, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x10, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0a, + 0x12, 0x08, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x52, 0x0a, 0x04, 0x4a, 0x6f, + 0x69, 0x6e, 0x12, 0x12, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x4a, 0x6f, 0x69, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x1e, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x1a, 0x10, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2f, 0x7b, 0x69, 0x64, 0x7d, 0x3a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x4e, + 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x1a, 0x16, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x13, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x0d, 0x12, 0x0b, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x4e, + 0x0a, 0x05, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x12, 0x13, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, + 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x2a, 0x10, 0x2f, 0x76, + 0x31, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x7b, 0x69, 0x64, 0x7d, 0x12, 0x50, + 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x14, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x0e, 0x12, 0x0c, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x12, 0x58, 0x0a, 0x09, 0x42, 0x75, 0x6c, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x17, 0x2e, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x42, 0x75, 0x6c, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x42, + 0x75, 0x6c, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x1a, 0x0d, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x3a, 0x01, 0x2a, 0x12, 0x5b, 0x0a, 0x0a, 0x42, 0x75, + 0x6c, 0x6b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x2e, 0x42, 0x75, 0x6c, 0x6b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x42, 0x75, 0x6c, 0x6b, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x2a, 0x0d, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x6f, 0x63, 0x75, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x3a, 0x01, 0x2a, 0x12, 0x4b, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x11, + 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x12, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x2f, + 0x76, 0x31, 0x2f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x7b, 0x69, 0x64, + 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x52, 0x0a, 0x03, 0x53, 0x65, 0x74, 0x12, 0x11, 0x2e, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x1a, 0x15, + 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x7b, 0x69, + 0x64, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x01, 0x2a, 0x12, 0x55, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x12, 0x14, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x2a, 0x15, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x7b, 0x69, 0x64, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, + 0x4c, 0x0a, 0x06, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x12, 0x14, 0x2e, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x15, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x15, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0f, 0x22, 0x0a, + 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x3a, 0x01, 0x2a, 0x12, 0x4e, 0x0a, + 0x07, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x1a, 0x16, 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x13, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0d, + 0x12, 0x0b, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x39, 0x0a, + 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x14, + 0x2e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x4e, 0x0a, 0x07, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x13, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0d, 0x12, 0x0b, 0x2f, 0x76, 0x31, + 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x42, 0x22, 0x5a, 0x20, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x73, 0x75, 0x6b, 0x61, 0x2f, 0x62, 0x6c, + 0x61, 0x73, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_protobuf_index_proto_rawDescOnce sync.Once + file_protobuf_index_proto_rawDescData = file_protobuf_index_proto_rawDesc +) + +func file_protobuf_index_proto_rawDescGZIP() []byte { + file_protobuf_index_proto_rawDescOnce.Do(func() { + file_protobuf_index_proto_rawDescData = protoimpl.X.CompressGZIP(file_protobuf_index_proto_rawDescData) + }) + return file_protobuf_index_proto_rawDescData +} + +var file_protobuf_index_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_protobuf_index_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_protobuf_index_proto_goTypes = []interface{}{ + (Event_Type)(0), // 0: index.Event.Type + (*LivenessCheckResponse)(nil), // 1: index.LivenessCheckResponse + (*ReadinessCheckResponse)(nil), // 2: index.ReadinessCheckResponse + (*Metadata)(nil), // 3: index.Metadata + (*Node)(nil), // 4: index.Node + (*Cluster)(nil), // 5: index.Cluster + (*JoinRequest)(nil), // 6: index.JoinRequest + (*LeaveRequest)(nil), // 7: index.LeaveRequest + (*NodeResponse)(nil), // 8: index.NodeResponse + (*ClusterResponse)(nil), // 9: index.ClusterResponse + (*Document)(nil), // 10: index.Document + (*GetRequest)(nil), // 11: index.GetRequest + (*GetResponse)(nil), // 12: index.GetResponse + (*SetRequest)(nil), // 13: index.SetRequest + (*DeleteRequest)(nil), // 14: index.DeleteRequest + (*BulkIndexRequest)(nil), // 15: index.BulkIndexRequest + (*BulkIndexResponse)(nil), // 16: index.BulkIndexResponse + (*BulkDeleteRequest)(nil), // 17: index.BulkDeleteRequest + (*BulkDeleteResponse)(nil), // 18: index.BulkDeleteResponse + (*SetMetadataRequest)(nil), // 19: index.SetMetadataRequest + (*DeleteMetadataRequest)(nil), // 20: index.DeleteMetadataRequest + (*SearchRequest)(nil), // 21: index.SearchRequest + (*SearchResponse)(nil), // 22: index.SearchResponse + (*MappingResponse)(nil), // 23: index.MappingResponse + (*Event)(nil), // 24: index.Event + (*WatchResponse)(nil), // 25: index.WatchResponse + (*MetricsResponse)(nil), // 26: index.MetricsResponse + nil, // 27: index.Cluster.NodesEntry + (*any.Any)(nil), // 28: google.protobuf.Any + (*empty.Empty)(nil), // 29: google.protobuf.Empty +} +var file_protobuf_index_proto_depIdxs = []int32{ + 3, // 0: index.Node.metadata:type_name -> index.Metadata + 27, // 1: index.Cluster.nodes:type_name -> index.Cluster.NodesEntry + 4, // 2: index.JoinRequest.node:type_name -> index.Node + 4, // 3: index.NodeResponse.node:type_name -> index.Node + 5, // 4: index.ClusterResponse.cluster:type_name -> index.Cluster + 13, // 5: index.BulkIndexRequest.requests:type_name -> index.SetRequest + 14, // 6: index.BulkDeleteRequest.requests:type_name -> index.DeleteRequest + 3, // 7: index.SetMetadataRequest.metadata:type_name -> index.Metadata + 0, // 8: index.Event.type:type_name -> index.Event.Type + 28, // 9: index.Event.data:type_name -> google.protobuf.Any + 24, // 10: index.WatchResponse.event:type_name -> index.Event + 4, // 11: index.Cluster.NodesEntry.value:type_name -> index.Node + 29, // 12: index.Index.LivenessCheck:input_type -> google.protobuf.Empty + 29, // 13: index.Index.ReadinessCheck:input_type -> google.protobuf.Empty + 29, // 14: index.Index.Node:input_type -> google.protobuf.Empty + 6, // 15: index.Index.Join:input_type -> index.JoinRequest + 29, // 16: index.Index.Cluster:input_type -> google.protobuf.Empty + 7, // 17: index.Index.Leave:input_type -> index.LeaveRequest + 29, // 18: index.Index.Snapshot:input_type -> google.protobuf.Empty + 15, // 19: index.Index.BulkIndex:input_type -> index.BulkIndexRequest + 17, // 20: index.Index.BulkDelete:input_type -> index.BulkDeleteRequest + 11, // 21: index.Index.Get:input_type -> index.GetRequest + 13, // 22: index.Index.Set:input_type -> index.SetRequest + 14, // 23: index.Index.Delete:input_type -> index.DeleteRequest + 21, // 24: index.Index.Search:input_type -> index.SearchRequest + 29, // 25: index.Index.Mapping:input_type -> google.protobuf.Empty + 29, // 26: index.Index.Watch:input_type -> google.protobuf.Empty + 29, // 27: index.Index.Metrics:input_type -> google.protobuf.Empty + 1, // 28: index.Index.LivenessCheck:output_type -> index.LivenessCheckResponse + 2, // 29: index.Index.ReadinessCheck:output_type -> index.ReadinessCheckResponse + 8, // 30: index.Index.Node:output_type -> index.NodeResponse + 29, // 31: index.Index.Join:output_type -> google.protobuf.Empty + 9, // 32: index.Index.Cluster:output_type -> index.ClusterResponse + 29, // 33: index.Index.Leave:output_type -> google.protobuf.Empty + 29, // 34: index.Index.Snapshot:output_type -> google.protobuf.Empty + 16, // 35: index.Index.BulkIndex:output_type -> index.BulkIndexResponse + 18, // 36: index.Index.BulkDelete:output_type -> index.BulkDeleteResponse + 12, // 37: index.Index.Get:output_type -> index.GetResponse + 29, // 38: index.Index.Set:output_type -> google.protobuf.Empty + 29, // 39: index.Index.Delete:output_type -> google.protobuf.Empty + 22, // 40: index.Index.Search:output_type -> index.SearchResponse + 23, // 41: index.Index.Mapping:output_type -> index.MappingResponse + 25, // 42: index.Index.Watch:output_type -> index.WatchResponse + 26, // 43: index.Index.Metrics:output_type -> index.MetricsResponse + 28, // [28:44] is the sub-list for method output_type + 12, // [12:28] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_protobuf_index_proto_init() } +func file_protobuf_index_proto_init() { + if File_protobuf_index_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_protobuf_index_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LivenessCheckResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadinessCheckResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Node); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JoinRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LeaveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Document); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BulkIndexRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BulkIndexResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BulkDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BulkDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetMetadataRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteMetadataRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SearchRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SearchResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MappingResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Event); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WatchResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_protobuf_index_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_protobuf_index_proto_rawDesc, + NumEnums: 1, + NumMessages: 27, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_protobuf_index_proto_goTypes, + DependencyIndexes: file_protobuf_index_proto_depIdxs, + EnumInfos: file_protobuf_index_proto_enumTypes, + MessageInfos: file_protobuf_index_proto_msgTypes, + }.Build() + File_protobuf_index_proto = out.File + file_protobuf_index_proto_rawDesc = nil + file_protobuf_index_proto_goTypes = nil + file_protobuf_index_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +const _ = grpc.SupportPackageIsVersion6 // IndexClient is the client API for Index service. // @@ -1295,10 +2058,10 @@ type IndexClient interface { } type indexClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewIndexClient(cc *grpc.ClientConn) IndexClient { +func NewIndexClient(cc grpc.ClientConnInterface) IndexClient { return &indexClient{cc} } @@ -1493,52 +2256,52 @@ type IndexServer interface { type UnimplementedIndexServer struct { } -func (*UnimplementedIndexServer) LivenessCheck(ctx context.Context, req *empty.Empty) (*LivenessCheckResponse, error) { +func (*UnimplementedIndexServer) LivenessCheck(context.Context, *empty.Empty) (*LivenessCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method LivenessCheck not implemented") } -func (*UnimplementedIndexServer) ReadinessCheck(ctx context.Context, req *empty.Empty) (*ReadinessCheckResponse, error) { +func (*UnimplementedIndexServer) ReadinessCheck(context.Context, *empty.Empty) (*ReadinessCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ReadinessCheck not implemented") } -func (*UnimplementedIndexServer) Node(ctx context.Context, req *empty.Empty) (*NodeResponse, error) { +func (*UnimplementedIndexServer) Node(context.Context, *empty.Empty) (*NodeResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Node not implemented") } -func (*UnimplementedIndexServer) Join(ctx context.Context, req *JoinRequest) (*empty.Empty, error) { +func (*UnimplementedIndexServer) Join(context.Context, *JoinRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Join not implemented") } -func (*UnimplementedIndexServer) Cluster(ctx context.Context, req *empty.Empty) (*ClusterResponse, error) { +func (*UnimplementedIndexServer) Cluster(context.Context, *empty.Empty) (*ClusterResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Cluster not implemented") } -func (*UnimplementedIndexServer) Leave(ctx context.Context, req *LeaveRequest) (*empty.Empty, error) { +func (*UnimplementedIndexServer) Leave(context.Context, *LeaveRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Leave not implemented") } -func (*UnimplementedIndexServer) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { +func (*UnimplementedIndexServer) Snapshot(context.Context, *empty.Empty) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Snapshot not implemented") } -func (*UnimplementedIndexServer) BulkIndex(ctx context.Context, req *BulkIndexRequest) (*BulkIndexResponse, error) { +func (*UnimplementedIndexServer) BulkIndex(context.Context, *BulkIndexRequest) (*BulkIndexResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method BulkIndex not implemented") } -func (*UnimplementedIndexServer) BulkDelete(ctx context.Context, req *BulkDeleteRequest) (*BulkDeleteResponse, error) { +func (*UnimplementedIndexServer) BulkDelete(context.Context, *BulkDeleteRequest) (*BulkDeleteResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method BulkDelete not implemented") } -func (*UnimplementedIndexServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { +func (*UnimplementedIndexServer) Get(context.Context, *GetRequest) (*GetResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") } -func (*UnimplementedIndexServer) Set(ctx context.Context, req *SetRequest) (*empty.Empty, error) { +func (*UnimplementedIndexServer) Set(context.Context, *SetRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") } -func (*UnimplementedIndexServer) Delete(ctx context.Context, req *DeleteRequest) (*empty.Empty, error) { +func (*UnimplementedIndexServer) Delete(context.Context, *DeleteRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") } -func (*UnimplementedIndexServer) Search(ctx context.Context, req *SearchRequest) (*SearchResponse, error) { +func (*UnimplementedIndexServer) Search(context.Context, *SearchRequest) (*SearchResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Search not implemented") } -func (*UnimplementedIndexServer) Mapping(ctx context.Context, req *empty.Empty) (*MappingResponse, error) { +func (*UnimplementedIndexServer) Mapping(context.Context, *empty.Empty) (*MappingResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Mapping not implemented") } -func (*UnimplementedIndexServer) Watch(req *empty.Empty, srv Index_WatchServer) error { +func (*UnimplementedIndexServer) Watch(*empty.Empty, Index_WatchServer) error { return status.Errorf(codes.Unimplemented, "method Watch not implemented") } -func (*UnimplementedIndexServer) Metrics(ctx context.Context, req *empty.Empty) (*MetricsResponse, error) { +func (*UnimplementedIndexServer) Metrics(context.Context, *empty.Empty) (*MetricsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Metrics not implemented") } diff --git a/protobuf/index.proto b/protobuf/index.proto index 9eb168b..9464a1d 100644 --- a/protobuf/index.proto +++ b/protobuf/index.proto @@ -3,7 +3,7 @@ syntax = "proto3"; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/api/annotations.proto"; -import "protoc-gen-swagger/options/annotations.proto"; +//import "protoc-gen-swagger/options/annotations.proto"; package index; From 49e2b05784ecaca590bf07e7a32255959a7a6e6f Mon Sep 17 00:00:00 2001 From: Minoru OSUKA Date: Fri, 12 Jun 2020 23:01:39 +0900 Subject: [PATCH 67/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index 1f334ff..401206c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,6 +7,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] +- Update protocol buffers #135 - Update zap #134 - Update gRPC #133 - Update raft #132 From 7a01badeb2698a55b8703fe8e18b9df91964fcd2 Mon Sep 17 00:00:00 2001 From: Minoru OSUKA Date: Tue, 30 Jun 2020 11:46:52 +0900 Subject: [PATCH 68/76] Update tests (#139) --- server/grpc_service_test.go | 71 +++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 35 deletions(-) diff --git a/server/grpc_service_test.go b/server/grpc_service_test.go index e7c89c6..753128b 100644 --- a/server/grpc_service_test.go +++ b/server/grpc_service_test.go @@ -569,7 +569,7 @@ func Test_GRPCService_Start_Stop(t *testing.T) { // _ = os.RemoveAll(tmpDir) // }() // -// logger := log.NewLogger("WARN", "", 500, 3, 30, false) +// logger := log.NewLogger("DEBUG", "", 500, 3, 30, false) // // certificateFile := "" // commonName := "" @@ -605,6 +605,8 @@ func Test_GRPCService_Start_Stop(t *testing.T) { // ), // } // +// ctx := context.Background() +// // // Node1 // raftAddress1 := fmt.Sprintf(":%d", util.TmpPort()) // grpcAddress1 := fmt.Sprintf(":%d", util.TmpPort()) @@ -662,7 +664,22 @@ func Test_GRPCService_Start_Stop(t *testing.T) { // if err := raftServer1.WaitForDetectLeader(60 * time.Second); err != nil { // t.Fatalf("%v", err) // } -// time.Sleep(3 * time.Second) +// time.Sleep(10 * time.Second) +// +// req1 := &protobuf.JoinRequest{ +// Id: "node1", +// Node: &protobuf.Node{ +// RaftAddress: raftAddress1, +// Metadata: &protobuf.Metadata{ +// GrpcAddress: grpcAddress1, +// HttpAddress: httpAddress1, +// }, +// }, +// } +// _, err = grpcService1.Join(ctx, req1) +// if err != nil { +// t.Fatalf("%v", err) +// } // // // Node2 // raftAddress2 := fmt.Sprintf(":%d", util.TmpPort()) @@ -718,7 +735,22 @@ func Test_GRPCService_Start_Stop(t *testing.T) { // t.Fatalf("%v", err) // } // }() -// time.Sleep(3 * time.Second) +// time.Sleep(10 * time.Second) +// +// req2 := &protobuf.JoinRequest{ +// Id: "node2", +// Node: &protobuf.Node{ +// RaftAddress: raftAddress2, +// Metadata: &protobuf.Metadata{ +// GrpcAddress: grpcAddress2, +// HttpAddress: httpAddress2, +// }, +// }, +// } +// _, err = grpcService1.Join(ctx, req2) +// if err != nil { +// t.Fatalf("%v", err) +// } // // // Node3 // raftAddress3 := fmt.Sprintf(":%d", util.TmpPort()) @@ -774,38 +806,7 @@ func Test_GRPCService_Start_Stop(t *testing.T) { // t.Fatalf("%v", err) // } // }() -// time.Sleep(3 * time.Second) -// -// ctx := context.Background() -// req1 := &protobuf.JoinRequest{ -// Id: "node1", -// Node: &protobuf.Node{ -// RaftAddress: raftAddress1, -// Metadata: &protobuf.Metadata{ -// GrpcAddress: grpcAddress1, -// HttpAddress: httpAddress1, -// }, -// }, -// } -// _, err = grpcService1.Join(ctx, req1) -// if err != nil { -// t.Fatalf("%v", err) -// } -// -// req2 := &protobuf.JoinRequest{ -// Id: "node2", -// Node: &protobuf.Node{ -// RaftAddress: raftAddress2, -// Metadata: &protobuf.Metadata{ -// GrpcAddress: grpcAddress2, -// HttpAddress: httpAddress2, -// }, -// }, -// } -// _, err = grpcService1.Join(ctx, req2) -// if err != nil { -// t.Fatalf("%v", err) -// } +// time.Sleep(10 * time.Second) // // req3 := &protobuf.JoinRequest{ // Id: "node3", From 97eabc0e2a56258f6ca5322a22efb4b491074a78 Mon Sep 17 00:00:00 2001 From: Minoru OSUKA Date: Tue, 30 Jun 2020 11:47:15 +0900 Subject: [PATCH 69/76] Update CHANGES.md --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index 401206c..9ac4ca5 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,6 +7,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] +- Update tests #139 - Update protocol buffers #135 - Update zap #134 - Update gRPC #133 From 72ca78305673357dcf773dee216552a727c01621 Mon Sep 17 00:00:00 2001 From: Minoru OSUKA Date: Tue, 30 Jun 2020 13:06:33 +0900 Subject: [PATCH 70/76] Update CHANGES.md --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 9ac4ca5..9c732b9 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). -## [Unreleased] +## [v0.9.1] - Update tests #139 - Update protocol buffers #135 From bfa81067715553dc982360ea584726e3331ba2a8 Mon Sep 17 00:00:00 2001 From: Pablo Castellano Date: Thu, 3 Sep 2020 17:43:01 +0200 Subject: [PATCH 71/76] Fix typo in README.md (#142) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 10c68d1..97fceac 100644 --- a/README.md +++ b/README.md @@ -133,7 +133,7 @@ $ make GOOS=darwin \ build ``` -### Buil flags +### Build flags Refer to the following table for the build flags of the supported Bleve extensions: From e49aff336bc203354ffe5001ebacd504161626a5 Mon Sep 17 00:00:00 2001 From: Minoru OSUKA Date: Fri, 15 Jan 2021 00:48:23 +0900 Subject: [PATCH 72/76] Upgrade Bleve (#145) --- builtin/config_bleve.go | 2 +- go.mod | 15 ++-------- go.sum | 66 +++++++++++++++-------------------------- mapping/mapping.go | 2 +- server/grpc_service.go | 2 +- server/raft_fsm.go | 4 +-- server/raft_server.go | 4 +-- storage/index.go | 36 +++++++++++----------- 8 files changed, 51 insertions(+), 80 deletions(-) diff --git a/builtin/config_bleve.go b/builtin/config_bleve.go index d95e507..e69a7b9 100644 --- a/builtin/config_bleve.go +++ b/builtin/config_bleve.go @@ -1,5 +1,5 @@ package builtin import ( - _ "github.com/blevesearch/bleve/config" + _ "github.com/blevesearch/bleve/v2/config" ) diff --git a/go.mod b/go.mod index 1484b6d..cc8f834 100644 --- a/go.mod +++ b/go.mod @@ -4,15 +4,9 @@ go 1.14 require ( github.com/bbva/raft-badger v1.0.1 - github.com/blevesearch/bleve v1.0.9 - github.com/blevesearch/cld2 v0.0.0-20200327141045-8b5f551d37f5 // indirect - github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect - github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect - github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 // indirect + github.com/blevesearch/bleve/v2 v2.0.0 + github.com/blevesearch/bleve_index_api v1.0.0 github.com/dgraph-io/badger/v2 v2.0.3 - github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect - github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect - github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect github.com/gogo/protobuf v1.3.0 // indirect github.com/golang/protobuf v1.4.2 github.com/gorilla/handlers v1.4.2 @@ -20,18 +14,13 @@ require ( github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/grpc-gateway v1.14.6 github.com/hashicorp/raft v1.1.2 - github.com/ikawaha/kagome.ipadic v1.1.2 // indirect - github.com/jmhodges/levigo v1.0.0 // indirect github.com/mash/go-accesslog v1.1.0 github.com/mitchellh/go-homedir v1.1.0 github.com/natefinch/lumberjack v2.0.0+incompatible github.com/prometheus/client_golang v1.5.1 github.com/prometheus/common v0.9.1 - github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 // indirect github.com/spf13/cobra v0.0.7 github.com/spf13/viper v1.4.0 - github.com/tebeka/snowball v0.4.1 // indirect - github.com/tecbot/gorocksdb v0.0.0-20190705090504-162552197222 // indirect go.uber.org/zap v1.15.0 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 google.golang.org/grpc v1.29.1 diff --git a/go.sum b/go.sum index 35defcb..8dbb0d6 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/RoaringBitmap/roaring v0.4.21 h1:WJ/zIlNX4wQZ9x8Ey33O1UaD9TCTakYsdLFSBcTwH+8= -github.com/RoaringBitmap/roaring v0.4.21/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= +github.com/RoaringBitmap/roaring v0.4.23 h1:gpyfd12QohbqhFO4NVDUdoPOCXsyahYRQhINmlHxKeo= +github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -25,28 +25,32 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/blevesearch/bleve v1.0.9 h1:kqw/Ank/61UV9/Bx9kCcnfH6qWPgmS8O5LNfpsgzASg= -github.com/blevesearch/bleve v1.0.9/go.mod h1:tb04/rbU29clbtNgorgFd8XdJea4x3ybYaOjWKr+UBU= -github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040 h1:SjYVcfJVZoCfBlg+fkaq2eoZHTf5HaJfaTeTkOtyfHQ= -github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040/go.mod h1:WH+MU2F4T0VmSdaPX+Wu5GYoZBrYWdOZWSjzvYcDmqQ= -github.com/blevesearch/cld2 v0.0.0-20200327141045-8b5f551d37f5 h1:/4ikScMMYMqsRFWJjCyzd3CNWB0lxvqDkqa5nEv6NMc= -github.com/blevesearch/cld2 v0.0.0-20200327141045-8b5f551d37f5/go.mod h1:PN0QNTLs9+j1bKy3d/GB/59wsNBFC4sWLWG3k69lWbc= +github.com/blevesearch/bleve/v2 v2.0.0 h1:ybdeQ1ZjQcaUKxRsduYqCDzBmveXYbCQUCpG+jHxcG8= +github.com/blevesearch/bleve/v2 v2.0.0/go.mod h1:OBP2Pktqik8vEiUlGhuWjYx7KiO4zD542+DHqICwM5w= +github.com/blevesearch/bleve_index_api v1.0.0 h1:Ds3XeuTxjXCkG6pgIwWDRyooJKNIuOKemnN0N0IkhTU= +github.com/blevesearch/bleve_index_api v1.0.0/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4= github.com/blevesearch/go-porterstemmer v1.0.3 h1:GtmsqID0aZdCSNiY8SkuPJ12pD4jI+DdXTAn4YRcHCo= github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M= github.com/blevesearch/mmap-go v1.0.2 h1:JtMHb+FgQCTTYIhtMvimw15dJwu1Y5lrZDMOFXVWPk0= github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA= +github.com/blevesearch/scorch_segment_api v1.0.0 h1:BUkCPWDg2gimTEyVDXf85I2buqqt4lh28uaVMiJsIYk= +github.com/blevesearch/scorch_segment_api v1.0.0/go.mod h1:KgRYmlfYC27NeM6cXOHx8LBgq7jn0atpV8mVWoBKBng= github.com/blevesearch/segment v0.9.0 h1:5lG7yBCx98or7gK2cHMKPukPZ/31Kag7nONpoBt22Ac= github.com/blevesearch/segment v0.9.0/go.mod h1:9PfHYUdQCgHktBgvtUOF4x+pc4/l8rdH0u5spnW85UQ= github.com/blevesearch/snowballstem v0.9.0 h1:lMQ189YspGP6sXvZQ4WZ+MLawfV8wOmPoD/iWeNXm8s= github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs= -github.com/blevesearch/zap/v11 v11.0.9 h1:wlSrDBeGN1G4M51NQHIXca23ttwUfQpWaK7uhO5lRSo= -github.com/blevesearch/zap/v11 v11.0.9/go.mod h1:47hzinvmY2EvvJruzsSCJpro7so8L1neseaGjrtXHOY= -github.com/blevesearch/zap/v12 v12.0.9 h1:PpatkY+BLVFZf0Ok3/fwgI/I4RU0z5blXFGuQANmqXk= -github.com/blevesearch/zap/v12 v12.0.9/go.mod h1:paQuvxy7yXor+0Mx8p2KNmJgygQbQNN+W6HRfL5Hvwc= -github.com/blevesearch/zap/v13 v13.0.1 h1:NSCM6uKu77Vn/x9nlPp4pE1o/bftqcOWZEHSyZVpGBQ= -github.com/blevesearch/zap/v13 v13.0.1/go.mod h1:XmyNLMvMf8Z5FjLANXwUeDW3e1+o77TTGUWrth7T9WI= -github.com/blevesearch/zap/v14 v14.0.0 h1:HF8Ysjm13qxB0jTGaKLlatNXmJbQD8bY+PrPxm5v4hE= -github.com/blevesearch/zap/v14 v14.0.0/go.mod h1:sUc/gPGJlFbSQ2ZUh/wGRYwkKx+Dg/5p+dd+eq6QMXk= +github.com/blevesearch/upsidedown_store_api v1.0.1 h1:1SYRwyoFLwG3sj0ed89RLtM15amfX2pXlYbFOnF8zNU= +github.com/blevesearch/upsidedown_store_api v1.0.1/go.mod h1:MQDVGpHZrpe3Uy26zJBf/a8h0FZY6xJbthIMm8myH2Q= +github.com/blevesearch/zapx/v11 v11.1.10 h1:8Eo3rXiHsVSP9Sk+4StrrwLrj9vyulhMVPmxTf8ZuDg= +github.com/blevesearch/zapx/v11 v11.1.10/go.mod h1:DTjbcBqrr/Uo82UBilDC8lEew42gN/OcIyiTNFtSijc= +github.com/blevesearch/zapx/v12 v12.1.10 h1:sqR+/0Z4dSTovApRqLA1HnilMtQer7a4UvPrNmPzlTM= +github.com/blevesearch/zapx/v12 v12.1.10/go.mod h1:14NmKnPrnKAIyiEJM566k/Jk+FQpuiflT5d3uaaK3MI= +github.com/blevesearch/zapx/v13 v13.1.10 h1:zCneEVRJDXwtDfSwh+33Dxguliv192vCK283zdGH4Sw= +github.com/blevesearch/zapx/v13 v13.1.10/go.mod h1:YsVY6YGpTEAlJOMjdL7EsdBLvjWd8kPa2gwJDNpqLJo= +github.com/blevesearch/zapx/v14 v14.1.10 h1:nD0vw2jxKogJFfA5WyoS4wNwZlVby3Aq8aW7CZi6YIw= +github.com/blevesearch/zapx/v14 v14.1.10/go.mod h1:hsULl5eJSxs5NEfBsmeT9qrqdCP+/ecpVZKt60M4V64= +github.com/blevesearch/zapx/v15 v15.1.10 h1:kZR3b9jO9l6s2B5UHI+1N1llLzJ4nYikkXQTMrDl1vQ= +github.com/blevesearch/zapx/v15 v15.1.10/go.mod h1:4ypq25bwtSQKzwEF1UERyIhmGTbMT3brY/n4NC5gRnM= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -68,18 +72,12 @@ github.com/couchbase/ghistogram v0.1.0 h1:b95QcQTCzjTUocDXp/uMgSNQi8oj1tGwnJ4bOD github.com/couchbase/ghistogram v0.1.0/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= github.com/couchbase/moss v0.1.0 h1:HCL+xxHUwmOaL44kMM/gU08OW6QGCui1WVFO58bjhNI= github.com/couchbase/moss v0.1.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs= -github.com/couchbase/vellum v1.0.1 h1:qrj9ohvZedvc51S5KzPfJ6P6z0Vqzv7Lx7k3mVc2WOk= -github.com/couchbase/vellum v1.0.1/go.mod h1:FcwrEivFpNi24R3jLOs3n+fs5RnuQnQqCLBJ1uAg1W4= +github.com/couchbase/vellum v1.0.2 h1:BrbP0NKiyDdndMPec8Jjhy0U47CZ0Lgx3xUC2r9rZqw= +github.com/couchbase/vellum v1.0.2/go.mod h1:FcwrEivFpNi24R3jLOs3n+fs5RnuQnQqCLBJ1uAg1W4= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d h1:SwD98825d6bdB+pEuTxWOXiSjBrHdOl/UVp75eI7JT8= -github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= -github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= -github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= -github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 h1:MZRmHqDBd0vxNwenEbKSQqRVT24d3C05ft8kduSwlqM= -github.com/cznic/strutil v0.0.0-20181122101858-275e90344537/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -100,12 +98,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= -github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= -github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= -github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= -github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQDg5gKsWoLBOB0n+ZW8s599zru8FJ2/Y= -github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -184,12 +176,8 @@ github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBA github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ikawaha/kagome.ipadic v1.1.2 h1:pFxZ1PpMpc6ZoBK712YN5cVK0u/ju2DZ+gRIOriJFFs= -github.com/ikawaha/kagome.ipadic v1.1.2/go.mod h1:DPSBbU0czaJhAb/5uKQZHMc9MTVRpDugJfX+HddPHHg= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= -github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -275,8 +263,6 @@ github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLk github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= -github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -319,10 +305,6 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/tebeka/snowball v0.4.1 h1:erVaJlHNQD465+S9dBGnl/AdDiGU0N8FTRo5QexNgCs= -github.com/tebeka/snowball v0.4.1/go.mod h1:4IfL14h1lvwZcp1sfXuuc7/7yCsvVffTWxWxCLfFpYg= -github.com/tecbot/gorocksdb v0.0.0-20190705090504-162552197222 h1:FLimlAjzuhq8loeLX7lLhKKeUgpA/4slynlNVB/Qaks= -github.com/tecbot/gorocksdb v0.0.0-20190705090504-162552197222/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -334,8 +316,8 @@ github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPy github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= diff --git a/mapping/mapping.go b/mapping/mapping.go index 7bf0d24..862cdf3 100644 --- a/mapping/mapping.go +++ b/mapping/mapping.go @@ -5,7 +5,7 @@ import ( "io/ioutil" "os" - "github.com/blevesearch/bleve/mapping" + "github.com/blevesearch/bleve/v2/mapping" ) func NewIndexMapping() *mapping.IndexMappingImpl { diff --git a/server/grpc_service.go b/server/grpc_service.go index 2d0843a..4d6f0ec 100644 --- a/server/grpc_service.go +++ b/server/grpc_service.go @@ -7,7 +7,7 @@ import ( "sync" "time" - "github.com/blevesearch/bleve" + "github.com/blevesearch/bleve/v2" "github.com/golang/protobuf/ptypes/empty" "github.com/hashicorp/raft" "github.com/mosuka/blast/client" diff --git a/server/raft_fsm.go b/server/raft_fsm.go index 2f69a94..d03dcfa 100644 --- a/server/raft_fsm.go +++ b/server/raft_fsm.go @@ -7,8 +7,8 @@ import ( "sync" "time" - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/mapping" + "github.com/blevesearch/bleve/v2" + "github.com/blevesearch/bleve/v2/mapping" "github.com/golang/protobuf/proto" "github.com/hashicorp/raft" "github.com/mosuka/blast/errors" diff --git a/server/raft_server.go b/server/raft_server.go index ba27747..5232081 100644 --- a/server/raft_server.go +++ b/server/raft_server.go @@ -10,8 +10,8 @@ import ( "time" raftbadgerdb "github.com/bbva/raft-badger" - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/mapping" + "github.com/blevesearch/bleve/v2" + "github.com/blevesearch/bleve/v2/mapping" "github.com/dgraph-io/badger/v2" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes/any" diff --git a/storage/index.go b/storage/index.go index da50a0b..26ba05d 100644 --- a/storage/index.go +++ b/storage/index.go @@ -4,10 +4,10 @@ import ( "os" "time" - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/document" - "github.com/blevesearch/bleve/index/scorch" - "github.com/blevesearch/bleve/mapping" + "github.com/blevesearch/bleve/v2" + "github.com/blevesearch/bleve/v2/index/scorch" + "github.com/blevesearch/bleve/v2/mapping" + bleveindex "github.com/blevesearch/bleve_index_api" _ "github.com/mosuka/blast/builtin" "github.com/mosuka/blast/errors" "github.com/mosuka/blast/protobuf" @@ -60,49 +60,49 @@ func (i *Index) Close() error { } func (i *Index) Get(id string) (map[string]interface{}, error) { - d, err := i.index.Document(id) + doc, err := i.index.Document(id) if err != nil { i.logger.Error("failed to get document", zap.String("id", id), zap.Error(err)) return nil, err } - if d == nil { + if doc == nil { err := errors.ErrNotFound i.logger.Debug("document does not found", zap.String("id", id), zap.Error(err)) return nil, err } fields := make(map[string]interface{}, 0) - for _, f := range d.Fields { + doc.VisitFields(func(field bleveindex.Field) { var v interface{} - switch field := f.(type) { - case *document.TextField: - v = string(field.Value()) - case *document.NumericField: + switch field := field.(type) { + case bleveindex.TextField: + v = field.Text() + case bleveindex.NumericField: n, err := field.Number() if err == nil { v = n } - case *document.DateTimeField: + case bleveindex.DateTimeField: d, err := field.DateTime() if err == nil { v = d.Format(time.RFC3339Nano) } } - existing, existed := fields[f.Name()] + existing, existed := fields[field.Name()] if existed { switch existing := existing.(type) { case []interface{}: - fields[f.Name()] = append(existing, v) + fields[field.Name()] = append(existing, v) case interface{}: arr := make([]interface{}, 2) arr[0] = existing arr[1] = v - fields[f.Name()] = arr + fields[field.Name()] = arr } } else { - fields[f.Name()] = v + fields[field.Name()] = v } - } + }) return fields, nil } @@ -207,7 +207,7 @@ func (i *Index) SnapshotItems() <-chan *protobuf.Document { ch := make(chan *protobuf.Document, 1024) go func() { - idx, _, err := i.index.Advanced() + idx, err := i.index.Advanced() if err != nil { i.logger.Error("failed to get index", zap.Error(err)) return From ea9f87ff5259ae1a8ec76a1bc1c8349aa64481b7 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Fri, 15 Jan 2021 01:12:07 +0900 Subject: [PATCH 73/76] Update Dockerfile --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index f3f3173..ee718e5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.14.1-stretch +FROM golang:1.15.6-buster ARG VERSION @@ -37,7 +37,7 @@ RUN echo "deb http://ftp.us.debian.org/debian/ jessie main contrib non-free" >> VERSION="${VERSION}" \ build -FROM debian:stretch-slim +FROM debian:buster-slim MAINTAINER Minoru Osuka "minoru.osuka@gmail.com" From fddf121dcb194bfe72d52329a391dfdc801a0cb5 Mon Sep 17 00:00:00 2001 From: Radu Topala Date: Mon, 18 Jan 2021 16:26:45 +0200 Subject: [PATCH 74/76] i147 Dockerfile fixes (#148) --- Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index ee718e5..08f4a1d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,15 +12,15 @@ RUN echo "deb http://ftp.us.debian.org/debian/ jessie main contrib non-free" >> apt-get upgrade -y && \ apt-get install -y \ git \ - golang \ + # golang \ libicu-dev \ libstemmer-dev \ gcc-4.8 \ g++-4.8 \ build-essential && \ apt-get clean && \ - update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-6 80 && \ - update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-6 80 && \ + #update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-6 80 && \ + #update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-6 80 && \ update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.8 90 && \ update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 90 && \ go get -u -v github.com/blevesearch/cld2 && \ From 592851dd743f0d12ba30a6e48edc6e9fda5d5687 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Mon, 18 Jan 2021 23:32:42 +0900 Subject: [PATCH 75/76] Update CHANGES.md --- CHANGES.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 9c732b9..4c9c85a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,6 +5,13 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). +## [Unreleased] +- Dockerfile fixes #148 + +## [v0.10.0] +- Upgrade Bleve #145 +- Fix typo in README.md #142 + ## [v0.9.1] - Update tests #139 From b85d24c327c80014cc7a59e2ae6c7195760bc5e4 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Fri, 10 Dec 2021 22:26:20 +0900 Subject: [PATCH 76/76] Update README.md --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 97fceac..c2e7dc7 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,8 @@ +# This project has been taken over by [Phalanx](https://github.com/mosuka/phalanx). + +This project has not been maintained for a long time. + + # Blast Blast is a full-text search and indexing server written in [Go](https://golang.org) built on top of [Bleve](http://www.blevesearch.com).