diff --git a/CHANGES.md b/CHANGES.md index da3402e..ce6fb19 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -27,6 +27,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Use protobuf document #98 - Change node state to Node_SHUTDOWN in a error #99 - Fix a bug for waiting to receive an indexer cluster updates from the stream #100 +- Migrate to grpc-gateway #105 ## [v0.7.1] - 2019-07-18 diff --git a/Dockerfile b/Dockerfile index 4b91268..bda65ea 100644 --- a/Dockerfile +++ b/Dockerfile @@ -67,7 +67,7 @@ COPY --from=0 /go/src/github.com/blevesearch/cld2/cld2/internal/*.so /usr/local/ COPY --from=0 /go/src/github.com/mosuka/blast/bin/* /usr/bin/ COPY --from=0 /go/src/github.com/mosuka/blast/docker-entrypoint.sh /usr/bin/ -EXPOSE 2000 5000 8000 +EXPOSE 2000 5000 6000 8000 ENTRYPOINT [ "/usr/bin/docker-entrypoint.sh" ] CMD [ "blast", "--help" ] diff --git a/Makefile b/Makefile index ecda13d..c77e6f4 100644 --- a/Makefile +++ b/Makefile @@ -30,6 +30,8 @@ PROTOBUFS = $(shell find . -name '*.proto' -print0 | xargs -0 -n1 dirname | sort TARGET_PACKAGES = $(shell find . -name 'main.go' -print0 | xargs -0 -n1 dirname | sort | uniq | grep -v /vendor/) +GRPC_GATEWAY_PATH = $(shell $(GO) list -m -f "{{.Dir}}" github.com/grpc-ecosystem/grpc-gateway) + ifeq ($(VERSION),) VERSION = latest endif @@ -44,7 +46,10 @@ endif .PHONY: protoc protoc: @echo ">> generating proto3 code" - @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=$$proto_dir --go_out=plugins=grpc:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done + @echo " GRPC_GATEWAY_PATH = $(GRPC_GATEWAY_PATH)" + @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --go_out=plugins=grpc:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done + @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --grpc-gateway_out=logtostderr=true,allow_delete_body=true:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done +# @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --swagger_out=logtostderr=true,allow_delete_body=true:. $$proto_dir/*.proto || exit 1; done .PHONY: format format: diff --git a/README.md b/README.md index e25d40d..beda7db 100644 --- a/README.md +++ b/README.md @@ -72,7 +72,6 @@ $ ./compile_libs.sh $ sudo cp *.so /usr/local/lib ``` - ### macOS High Sierra Version 10.13.6 ```bash @@ -123,7 +122,6 @@ $ make \ You can enable all the Bleve extensions supported by Blast as follows: - ### Linux ```bash @@ -134,8 +132,7 @@ $ make \ build ``` - -#### macOS +### macOS ```bash $ make \ @@ -147,7 +144,6 @@ $ make \ build ``` - ### Build flags Please refer to the following table for details of Bleve Extensions: @@ -164,7 +160,6 @@ Please refer to the following table for details of Bleve Extensions: If you want to enable the feature whose `CGO_ENABLE` is `1`, please install it referring to the Installing dependencies section above. - ### Binaries You can see the binary file when build successful like so: @@ -186,7 +181,6 @@ $ make \ You can test with all the Bleve extensions supported by Blast as follows: - ### Linux ```bash @@ -197,8 +191,7 @@ $ make \ test ``` - -#### macOS +### macOS ```bash $ make \ @@ -223,8 +216,7 @@ $ make \ dist ``` - -#### macOS +### macOS ```bash $ make \ @@ -237,7 +229,6 @@ $ make \ ``` - ## Starting Blast in standalone mode ![standalone](https://user-images.githubusercontent.com/970948/59768879-138f5180-92e0-11e9-8b33-c7b1a93e0893.png) @@ -247,6 +238,7 @@ Running a Blast in standalone mode is easy. Start a indexer like so: ```bash $ ./bin/blast indexer start \ --grpc-address=:5000 \ + --grpc-gateway-address=:6000 \ --http-address=:8000 \ --node-id=indexer1 \ --node-address=:2000 \ @@ -266,26 +258,28 @@ Please refer to following document for details of index mapping: You can check the node with the following command: ```bash -$ ./bin/blast indexer node info --grpc-address=:5000 +$ ./bin/blast indexer node info --grpc-address=:5000 | jq . ``` You can see the result in JSON format. The result of the above command is: ```json { - "id": "indexer1", - "bind_address": ":2000", - "state": 3, - "metadata": { - "grpc_address": ":5000", - "http_address": ":8000" + "node": { + "id": "indexer1", + "bind_address": ":2000", + "state": 3, + "metadata": { + "grpc_address": ":5000", + "grpc_gateway_address": ":6000", + "http_address": ":8000" + } } } ``` You can now put, get, search and delete the documents via CLI. - ### Indexing a document via CLI For document indexing, execute the following command: @@ -293,209 +287,213 @@ For document indexing, execute the following command: ```bash $ ./bin/blast indexer index --grpc-address=:5000 enwiki_1 ' { - "title_en": "Search engine (computing)", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "_type": "enwiki" + "fields": { + "title_en": "Search engine (computing)", + "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "_type": "enwiki" + } } -' +' | jq . ``` or ```bash -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/wiki_doc_enwiki_1.json +$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/wiki_doc_enwiki_1.json | jq . ``` You can see the result in JSON format. The result of the above command is: -```bash -1 +```json +{} ``` - ### Getting a document via CLI Getting a document is as following: ```bash -$ ./bin/blast indexer get --grpc-address=:5000 enwiki_1 +$ ./bin/blast indexer get --grpc-address=:5000 enwiki_1 | jq . ``` You can see the result in JSON format. The result of the above command is: ```json { - "_type": "enwiki", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" + "fields": { + "_type": "enwiki", + "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "title_en": "Search engine (computing)" + } } ``` - ### Searching documents via CLI Searching documents is as like following: ```bash -$ ./bin/blast indexer search --grpc-address=:5000 --file=./example/wiki_search_request.json +$ ./bin/blast indexer search --grpc-address=:5000 --file=./example/wiki_search_request.json | jq . ``` You can see the result in JSON format. The result of the above command is: ```json { - "status": { - "total": 1, - "failed": 0, - "successful": 1 - }, - "request": { - "query": { - "query": "+_all:search" - }, - "size": 10, - "from": 0, - "highlight": { - "style": "html", - "fields": [ - "title", - "text" - ] + "search_result": { + "status": { + "total": 1, + "failed": 0, + "successful": 1 }, - "fields": [ - "*" - ], - "facets": { - "Timestamp range": { - "size": 10, - "field": "timestamp", - "date_ranges": [ - { - "end": "2010-12-31T23:59:59Z", - "name": "2001 - 2010", - "start": "2001-01-01T00:00:00Z" - }, - { - "end": "2020-12-31T23:59:59Z", - "name": "2011 - 2020", - "start": "2011-01-01T00:00:00Z" - } + "request": { + "query": { + "query": "+_all:search" + }, + "size": 10, + "from": 0, + "highlight": { + "style": "html", + "fields": [ + "title", + "text" ] }, - "Type count": { - "size": 10, - "field": "_type" - } - }, - "explain": false, - "sort": [ - "-_score", - "_id", - "-timestamp" - ], - "includeLocations": false - }, - "hits": [ - { - "index": "/tmp/blast/indexer1/index", - "id": "enwiki_1", - "score": 0.09703538256409851, - "locations": { - "text_en": { - "search": [ - { - "pos": 2, - "start": 2, - "end": 8, - "array_positions": null - }, - { - "pos": 20, - "start": 118, - "end": 124, - "array_positions": null - }, - { - "pos": 33, - "start": 195, - "end": 201, - "array_positions": null - }, - { - "pos": 68, - "start": 415, - "end": 421, - "array_positions": null - }, + "fields": [ + "*" + ], + "facets": { + "Timestamp range": { + "size": 10, + "field": "timestamp", + "date_ranges": [ { - "pos": 73, - "start": 438, - "end": 444, - "array_positions": null + "end": "2010-12-31T23:59:59Z", + "name": "2001 - 2010", + "start": "2001-01-01T00:00:00Z" }, { - "pos": 76, - "start": 458, - "end": 466, - "array_positions": null + "end": "2020-12-31T23:59:59Z", + "name": "2011 - 2020", + "start": "2011-01-01T00:00:00Z" } ] }, - "title_en": { - "search": [ - { - "pos": 1, - "start": 0, - "end": 6, - "array_positions": null - } - ] + "Type count": { + "size": 10, + "field": "_type" } }, + "explain": false, "sort": [ - "_score", - "enwiki_1", - " \u0001\u0015\u001f\u0004~80Pp\u0000" + "-_score", + "_id", + "-timestamp" ], - "fields": { - "_type": "enwiki", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" - } - } - ], - "total_hits": 1, - "max_score": 0.09703538256409851, - "took": 688819, - "facets": { - "Timestamp range": { - "field": "timestamp", - "total": 1, - "missing": 0, - "other": 0, - "date_ranges": [ - { - "name": "2011 - 2020", - "start": "2011-01-01T00:00:00Z", - "end": "2020-12-31T23:59:59Z", - "count": 1 - } - ] + "includeLocations": false }, - "Type count": { - "field": "_type", - "total": 1, - "missing": 0, - "other": 0, - "terms": [ - { - "term": "enwiki", - "count": 1 + "hits": [ + { + "index": "/tmp/blast/indexer1/index", + "id": "enwiki_1", + "score": 0.09703538256409851, + "locations": { + "text_en": { + "search": [ + { + "pos": 2, + "start": 2, + "end": 8, + "array_positions": null + }, + { + "pos": 20, + "start": 118, + "end": 124, + "array_positions": null + }, + { + "pos": 33, + "start": 195, + "end": 201, + "array_positions": null + }, + { + "pos": 68, + "start": 415, + "end": 421, + "array_positions": null + }, + { + "pos": 73, + "start": 438, + "end": 444, + "array_positions": null + }, + { + "pos": 76, + "start": 458, + "end": 466, + "array_positions": null + } + ] + }, + "title_en": { + "search": [ + { + "pos": 1, + "start": 0, + "end": 6, + "array_positions": null + } + ] + } + }, + "sort": [ + "_score", + "enwiki_1", + " \u0001\u0015\u001f\u0004~80Pp\u0000" + ], + "fields": { + "_type": "enwiki", + "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "title_en": "Search engine (computing)" } - ] + } + ], + "total_hits": 1, + "max_score": 0.09703538256409851, + "took": 122105, + "facets": { + "Timestamp range": { + "field": "timestamp", + "total": 1, + "missing": 0, + "other": 0, + "date_ranges": [ + { + "name": "2011 - 2020", + "start": "2011-01-01T00:00:00Z", + "end": "2020-12-31T23:59:59Z", + "count": 1 + } + ] + }, + "Type count": { + "field": "_type", + "total": 1, + "missing": 0, + "other": 0, + "terms": [ + { + "term": "enwiki", + "count": 1 + } + ] + } } } } @@ -508,7 +506,6 @@ Please refer to following document for details of search request and result: - https://github.com/blevesearch/bleve/blob/master/search.go#L267 - https://github.com/blevesearch/bleve/blob/master/search.go#L443 - ### Deleting a document via CLI Deleting a document is as following: @@ -519,38 +516,40 @@ $ ./bin/blast indexer delete --grpc-address=:5000 enwiki_1 You can see the result in JSON format. The result of the above command is: -```bash -1 +```json +{} ``` - ### Indexing documents in bulk via CLI Indexing documents in bulk, run the following command: ```bash -$ ./bin/blast indexer index --grpc-address=:5000 --file=./example/wiki_bulk_index.jsonl --bulk +$ ./bin/blast indexer index --grpc-address=:5000 --file=./example/wiki_bulk_index.jsonl --bulk | jq . ``` You can see the result in JSON format. The result of the above command is: -```bash -36 +```json +{ + "count": 36 +} ``` - ### Deleting documents in bulk via CLI Deleting documents in bulk, run the following command: ```bash -$ ./bin/blast indexer delete --grpc-address=:5000 --file=./example/wiki_bulk_delete.txt +$ ./bin/blast indexer delete --grpc-address=:5000 --file=./example/wiki_bulk_delete.txt | jq . ``` You can see the result in JSON format. The result of the above command is: -```bash -4 +```json +{ + "count": 36 +} ``` @@ -558,26 +557,33 @@ You can see the result in JSON format. The result of the above command is: Also you can do above commands via HTTP REST API that listened port 5002. - ### Indexing a document via HTTP REST API Indexing a document via HTTP is as following: ```bash -$ curl -X PUT 'http://127.0.0.1:8000/documents/enwiki_1' -H 'Content-Type: application/json' --data-binary ' +$ curl -X PUT 'http://127.0.0.1:6000/v1/documents/enwiki_1' -H 'Content-Type: application/json' --data-binary ' { - "title_en": "Search engine (computing)", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "_type": "enwiki" + "fields": { + "title_en": "Search engine (computing)", + "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "_type": "enwiki" + } } -' +' | jq . ``` or ```bash -$ curl -X PUT 'http://127.0.0.1:8000/documents' -H 'Content-Type: application/json' --data-binary @./example/wiki_doc_enwiki_1.json +$ curl -X PUT 'http://127.0.0.1:6000/v1/documents' -H 'Content-Type: application/json' --data-binary @./example/wiki_doc_enwiki_1.json | jq . +``` + +You can see the result in JSON format. The result of the above command is: + +```json +{} ``` ### Getting a document via HTTP REST API @@ -585,43 +591,235 @@ $ curl -X PUT 'http://127.0.0.1:8000/documents' -H 'Content-Type: application/js Getting a document via HTTP is as following: ```bash -$ curl -X GET 'http://127.0.0.1:8000/documents/enwiki_1' +$ curl -X GET 'http://127.0.0.1:6000/v1/documents/enwiki_1' -H 'Content-Type: application/json' | jq . ``` +You can see the result in JSON format. The result of the above command is: + +```json +{ + "fields": { + "_type": "enwiki", + "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "title_en": "Search engine (computing)" + } +} +``` ### Searching documents via HTTP REST API Searching documents via HTTP is as following: ```bash -$ curl -X POST 'http://127.0.0.1:8000/search' -H 'Content-Type: application/json' --data-binary @./example/wiki_search_request.json +$ curl -X POST 'http://127.0.0.1:6000/v1/search' -H 'Content-Type: application/json' --data-binary @./example/wiki_search_request.json | jq . ``` +You can see the result in JSON format. The result of the above command is: + +```json +{ + "search_result": { + "status": { + "total": 1, + "failed": 0, + "successful": 1 + }, + "request": { + "query": { + "query": "+_all:search" + }, + "size": 10, + "from": 0, + "highlight": { + "style": "html", + "fields": [ + "title", + "text" + ] + }, + "fields": [ + "*" + ], + "facets": { + "Timestamp range": { + "size": 10, + "field": "timestamp", + "date_ranges": [ + { + "end": "2010-12-31T23:59:59Z", + "name": "2001 - 2010", + "start": "2001-01-01T00:00:00Z" + }, + { + "end": "2020-12-31T23:59:59Z", + "name": "2011 - 2020", + "start": "2011-01-01T00:00:00Z" + } + ] + }, + "Type count": { + "size": 10, + "field": "_type" + } + }, + "explain": false, + "sort": [ + "-_score", + "_id", + "-timestamp" + ], + "includeLocations": false + }, + "hits": [ + { + "index": "/tmp/blast/indexer1/index", + "id": "enwiki_1", + "score": 0.09703538256409851, + "locations": { + "text_en": { + "search": [ + { + "pos": 2, + "start": 2, + "end": 8, + "array_positions": null + }, + { + "pos": 20, + "start": 118, + "end": 124, + "array_positions": null + }, + { + "pos": 33, + "start": 195, + "end": 201, + "array_positions": null + }, + { + "pos": 68, + "start": 415, + "end": 421, + "array_positions": null + }, + { + "pos": 73, + "start": 438, + "end": 444, + "array_positions": null + }, + { + "pos": 76, + "start": 458, + "end": 466, + "array_positions": null + } + ] + }, + "title_en": { + "search": [ + { + "pos": 1, + "start": 0, + "end": 6, + "array_positions": null + } + ] + } + }, + "sort": [ + "_score", + "enwiki_1", + " \u0001\u0015\u001f\u0004~80Pp\u0000" + ], + "fields": { + "_type": "enwiki", + "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "title_en": "Search engine (computing)" + } + } + ], + "total_hits": 1, + "max_score": 0.09703538256409851, + "took": 323568, + "facets": { + "Timestamp range": { + "field": "timestamp", + "total": 1, + "missing": 0, + "other": 0, + "date_ranges": [ + { + "name": "2011 - 2020", + "start": "2011-01-01T00:00:00Z", + "end": "2020-12-31T23:59:59Z", + "count": 1 + } + ] + }, + "Type count": { + "field": "_type", + "total": 1, + "missing": 0, + "other": 0, + "terms": [ + { + "term": "enwiki", + "count": 1 + } + ] + } + } + } +} +``` ### Deleting a document via HTTP REST API Deleting a document via HTTP is as following: ```bash -$ curl -X DELETE 'http://127.0.0.1:8000/documents/enwiki_1' +$ curl -X DELETE 'http://127.0.0.1:6000/v1/documents/enwiki_1' -H 'Content-Type: application/json' | jq . ``` +You can see the result in JSON format. The result of the above command is: + +```json +{} +``` ### Indexing documents in bulk via HTTP REST API Indexing documents in bulk via HTTP is as following: ```bash -$ curl -X PUT 'http://127.0.0.1:8000/documents?bulk=true' -H 'Content-Type: application/x-ndjson' --data-binary @./example/wiki_bulk_index.jsonl +$ curl -X PUT 'http://127.0.0.1:6000/v1/bulk' -H 'Content-Type: application/x-ndjson' --data-binary @./example/wiki_bulk_index.jsonl | jq . ``` +You can see the result in JSON format. The result of the above command is: + +```json +{ + "count": 36 +} +``` ### Deleting documents in bulk via HTTP REST API Deleting documents in bulk via HTTP is as following: ```bash -$ curl -X DELETE 'http://127.0.0.1:8000/documents' -H 'Content-Type: text/plain' --data-binary @./example/wiki_bulk_delete.txt +$ curl -X DELETE 'http://127.0.0.1:6000/v1/bulk' -H 'Content-Type: text/plain' --data-binary @./example/wiki_bulk_delete.txt | jq . +``` + +You can see the result in JSON format. The result of the above command is: + +```json +{ + "count": 36 +} ``` @@ -636,6 +834,7 @@ First of all, start a indexer in standalone. ```bash $ ./bin/blast indexer start \ --grpc-address=:5000 \ + --grpc-gateway-address=:6000 \ --http-address=:8000 \ --node-id=indexer1 \ --node-address=:2000 \ @@ -652,6 +851,7 @@ Then, start two more indexers. $ ./bin/blast indexer start \ --peer-grpc-address=:5000 \ --grpc-address=:5010 \ + --grpc-gateway-address=:6010 \ --http-address=:8010 \ --node-id=indexer2 \ --node-address=:2010 \ @@ -661,6 +861,7 @@ $ ./bin/blast indexer start \ $ ./bin/blast indexer start \ --peer-grpc-address=:5000 \ --grpc-address=:5020 \ + --grpc-gateway-address=:6020 \ --http-address=:8020 \ --node-id=indexer3 \ --node-address=:2020 \ @@ -673,41 +874,51 @@ _Above example shows each Blast node running on the same host, so each node must This instructs each new node to join an existing node, specifying `--peer-addr=:5001`. Each node recognizes the joining clusters when started. So you have a 3-node cluster. That way you can tolerate the failure of 1 node. You can check the peers in the cluster with the following command: +```bash +$ ./bin/blast indexer cluster info --grpc-address=:5000 | jq . +``` + +or ```bash -$ ./bin/blast indexer cluster info --grpc-address=:5000 +$ curl -X GET 'http://127.0.0.1:6000/v1/cluster/status' -H 'Content-Type: application/json' | jq . ``` You can see the result in JSON format. The result of the above command is: ```json { - "nodes": { - "indexer1": { - "id": "indexer1", - "bind_address": ":2000", - "state": 3, - "metadata": { - "grpc_address": ":5000", - "http_address": ":8000" - } - }, - "indexer2": { - "id": "indexer2", - "bind_address": ":2010", - "state": 1, - "metadata": { - "grpc_address": ":5010", - "http_address": ":8010" - } - }, - "indexer3": { - "id": "indexer3", - "bind_address": ":2020", - "state": 1, - "metadata": { - "grpc_address": ":5020", - "http_address": ":8020" + "cluster": { + "nodes": { + "indexer1": { + "id": "indexer1", + "bind_address": ":2000", + "state": 1, + "metadata": { + "grpc_address": ":5000", + "grpc_gateway_address": ":6000", + "http_address": ":8000" + } + }, + "indexer2": { + "id": "indexer2", + "bind_address": ":2010", + "state": 1, + "metadata": { + "grpc_address": ":5010", + "grpc_gateway_address": ":6010", + "http_address": ":8010" + } + }, + "indexer3": { + "id": "indexer3", + "bind_address": ":2020", + "state": 3, + "metadata": { + "grpc_address": ":5020", + "grpc_gateway_address": ":6020", + "http_address": ":8020" + } } } } @@ -719,43 +930,45 @@ Recommend 3 or more odd number of nodes in the cluster. In failure scenarios, da The following command indexes documents to any node in the cluster: ```bash -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/wiki_doc_enwiki_1.json +$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/wiki_doc_enwiki_1.json | jq . ``` So, you can get the document from the node specified by the above command as follows: ```bash -$ ./bin/blast indexer get --grpc-address=:5000 enwiki_1 +$ ./bin/blast indexer get --grpc-address=:5000 enwiki_1 | jq . ``` You can see the result in JSON format. The result of the above command is: ```json { - "_type": "enwiki", - "contributor": "unknown", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" + "fields": { + "_type": "enwiki", + "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "title_en": "Search engine (computing)" + } } ``` You can also get the same document from other nodes in the cluster as follows: ```bash -$ ./bin/blast indexer get --grpc-address=:5010 enwiki_1 -$ ./bin/blast indexer get --grpc-address=:5020 enwiki_1 +$ ./bin/blast indexer get --grpc-address=:5010 enwiki_1 | jq . +$ ./bin/blast indexer get --grpc-address=:5020 enwiki_1 | jq . ``` You can see the result in JSON format. The result of the above command is: ```json { - "_type": "enwiki", - "contributor": "unknown", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" + "fields": { + "_type": "enwiki", + "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "title_en": "Search engine (computing)" + } } ``` @@ -772,13 +985,14 @@ Blast provides the following type of node for federation: - manager: Manager manage common index mappings to index across multiple indexers. It also manages information and status of clusters that participate in the federation. - dispatcher: Dispatcher is responsible for distributed search or indexing of each indexer. In the case of a index request, send document to each cluster based on the document ID. And in the case of a search request, the same query is sent to each cluster, then the search results are merged and returned to the client. -### Bring up the manager cluster. +### Bring up the manager cluster Manager can also bring up a cluster like an indexer. Specify a common index mapping for federation at startup. ```bash $ ./bin/blast manager start \ --grpc-address=:5100 \ + --grpc-gateway-address=:6100 \ --http-address=:8100 \ --node-id=manager1 \ --node-address=:2100 \ @@ -791,6 +1005,7 @@ $ ./bin/blast manager start \ $ ./bin/blast manager start \ --peer-grpc-address=:5100 \ --grpc-address=:5110 \ + --grpc-gateway-address=:6110 \ --http-address=:8110 \ --node-id=manager2 \ --node-address=:2110 \ @@ -800,6 +1015,7 @@ $ ./bin/blast manager start \ $ ./bin/blast manager start \ --peer-grpc-address=:5100 \ --grpc-address=:5120 \ + --grpc-gateway-address=:6120 \ --http-address=:8120 \ --node-id=manager3 \ --node-address=:2120 \ @@ -807,7 +1023,7 @@ $ ./bin/blast manager start \ --raft-storage-type=boltdb ``` -### Bring up the indexer cluster. +### Bring up the indexer cluster Federated mode differs from cluster mode that it specifies the manager in start up to bring up indexer cluster. The following example starts two 3-node clusters. @@ -817,6 +1033,7 @@ $ ./bin/blast indexer start \ --manager-grpc-address=:5100 \ --shard-id=shard1 \ --grpc-address=:5000 \ + --grpc-gateway-address=:6000 \ --http-address=:8000 \ --node-id=indexer1 \ --node-address=:2000 \ @@ -827,6 +1044,7 @@ $ ./bin/blast indexer start \ --manager-grpc-address=:5100 \ --shard-id=shard1 \ --grpc-address=:5010 \ + --grpc-gateway-address=:6010 \ --http-address=:8010 \ --node-id=indexer2 \ --node-address=:2010 \ @@ -837,6 +1055,7 @@ $ ./bin/blast indexer start \ --manager-grpc-address=:5100 \ --shard-id=shard1 \ --grpc-address=:5020 \ + --grpc-gateway-address=:6020 \ --http-address=:8020 \ --node-id=indexer3 \ --node-address=:2020 \ @@ -847,6 +1066,7 @@ $ ./bin/blast indexer start \ --manager-grpc-address=:5100 \ --shard-id=shard2 \ --grpc-address=:5030 \ + --grpc-gateway-address=:6030 \ --http-address=:8030 \ --node-id=indexer4 \ --node-address=:2030 \ @@ -857,6 +1077,7 @@ $ ./bin/blast indexer start \ --manager-grpc-address=:5100 \ --shard-id=shard2 \ --grpc-address=:5040 \ + --grpc-gateway-address=:6040 \ --http-address=:8040 \ --node-id=indexer5 \ --node-address=:2040 \ @@ -867,6 +1088,7 @@ $ ./bin/blast indexer start \ --manager-grpc-address=:5100 \ --shard-id=shard2 \ --grpc-address=:5050 \ + --grpc-gateway-address=:6050 \ --http-address=:8050 \ --node-id=indexer6 \ --node-address=:2050 \ @@ -874,7 +1096,7 @@ $ ./bin/blast indexer start \ --raft-storage-type=boltdb ``` -### Start up the dispatcher. +### Start up the dispatcher Finally, start the dispatcher with a manager that manages the target federation so that it can perform distributed search and indexing. @@ -882,29 +1104,32 @@ Finally, start the dispatcher with a manager that manages the target federation $ ./bin/blast dispatcher start \ --manager-grpc-address=:5100 \ --grpc-address=:5200 \ + --grpc-gateway-address=:6200 \ --http-address=:8200 ``` +### Check the cluster info + ```bash -$ ./bin/blast manager cluster info --grpc-address=:5100 -$ ./bin/blast indexer cluster info --grpc-address=:5000 -$ ./bin/blast indexer cluster info --grpc-address=:5040 +$ ./bin/blast manager cluster info --grpc-address=:5100 | jq . +$ ./bin/blast indexer cluster info --grpc-address=:5000 | jq . +$ ./bin/blast indexer cluster info --grpc-address=:5030 | jq . +$ ./bin/blast manager get cluster --grpc-address=:5100 --format=json | jq . ``` ```bash -$ ./bin/blast dispatcher index --grpc-address=:5200 --file=./example/wiki_bulk_index.jsonl --bulk +$ ./bin/blast dispatcher index --grpc-address=:5200 --file=./example/wiki_bulk_index.jsonl --bulk | jq . ``` ```bash -$ ./bin/blast dispatcher search --grpc-address=:5200 --file=./example/wiki_search_request_simple.json +$ ./bin/blast dispatcher search --grpc-address=:5200 --file=./example/wiki_search_request_simple.json | jq . ``` ```bash -$ ./bin/blast dispatcher delete --grpc-address=:5200 --file=./example/wiki_bulk_delete.txt +$ ./bin/blast dispatcher delete --grpc-address=:5200 --file=./example/wiki_bulk_delete.txt | jq . ``` - ## Blast on Docker ### Building Docker container image on localhost @@ -925,7 +1150,6 @@ $ docker pull mosuka/blast:latest See https://hub.docker.com/r/mosuka/blast/tags/ - ### Pulling Docker container image from docker.io You can also use the Docker container image already registered in docker.io like so: @@ -934,7 +1158,6 @@ You can also use the Docker container image already registered in docker.io like $ docker pull mosuka/blast:latest ``` - ### Running Indexer on Docker Running a Blast data node on Docker. Start Blast data node like so: @@ -943,10 +1166,12 @@ Running a Blast data node on Docker. Start Blast data node like so: $ docker run --rm --name blast-indexer1 \ -p 2000:2000 \ -p 5000:5000 \ + -p 6000:6000 \ -p 8000:8000 \ -v $(pwd)/example:/opt/blast/example \ mosuka/blast:latest blast indexer start \ --grpc-address=:5000 \ + --grpc-gateway-address=:6000 \ --http-address=:8000 \ --node-id=blast-indexer1 \ --node-address=:2000 \ @@ -968,7 +1193,6 @@ $ docker exec -it blast-indexer1 blast indexer node info --grpc-address=:5000 This section explain how to index Wikipedia dump to Blast. - ### Install wikiextractor ```bash @@ -976,14 +1200,12 @@ $ cd ${HOME} $ git clone git@github.com:attardi/wikiextractor.git ``` - ### Download wikipedia dump ```bash $ curl -o ~/tmp/enwiki-20190101-pages-articles.xml.bz2 https://dumps.wikimedia.org/enwiki/20190101/enwiki-20190101-pages-articles.xml.bz2 ``` - ### Parsing wikipedia dump ```bash @@ -991,12 +1213,12 @@ $ cd wikiextractor $ ./WikiExtractor.py -o ~/tmp/enwiki --json ~/tmp/enwiki-20190101-pages-articles.xml.bz2 ``` - ### Starting Indexer ```bash $ ./bin/blast indexer start \ --grpc-address=:5000 \ + --grpc-gateway-address=:6000 \ --http-address=:8000 \ --node-id=indexer1 \ --node-address=:2000 \ @@ -1015,7 +1237,8 @@ $ for FILE in $(find ~/tmp/enwiki -type f -name '*' | sort) echo "Indexing ${FILE}" TIMESTAMP=$(date -u "+%Y-%m-%dT%H:%M:%SZ") DOCS=$(cat ${FILE} | jq -r '. + {fields: {url: .url, title_en: .title, text_en: .text, timestamp: "'${TIMESTAMP}'", _type: "enwiki"}} | del(.url) | del(.title) | del(.text) | del(.fields.id)' | jq -c) - curl -s -X PUT -H 'Content-Type: application/json' "http://127.0.0.1:8000/documents?bulk=true" --data-binary "${DOCS}" + curl -s -X PUT -H 'Content-Type: application/x-ndjson' "http://127.0.0.1:6000/v1/bulk" --data-binary "${DOCS}" + echo "" done ``` diff --git a/cmd/blast/dispatcher_delete.go b/cmd/blast/dispatcher_delete.go index 8466634..255e350 100644 --- a/cmd/blast/dispatcher_delete.go +++ b/cmd/blast/dispatcher_delete.go @@ -16,12 +16,13 @@ package main import ( "bufio" - "encoding/json" + "errors" "fmt" "io" "os" "github.com/mosuka/blast/dispatcher" + "github.com/mosuka/blast/protobuf/distribute" "github.com/urfave/cli" ) @@ -30,75 +31,95 @@ func dispatcherDelete(c *cli.Context) error { filePath := c.String("file") id := c.Args().Get(0) - ids := make([]string, 0) - - if id != "" { - ids = append(ids, id) + // create client + client, err := dispatcher.NewGRPCClient(grpcAddr) + if err != nil { + return err } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() + + marshaler := dispatcher.JsonMarshaler{} - if filePath != "" { - _, err := os.Stat(filePath) + if id != "" { + req := &distribute.DeleteRequest{ + Id: id, + } + resp, err := client.Delete(req) if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error return err } - - // read index mapping file - file, err := os.Open(filePath) + respBytes, err := marshaler.Marshal(resp) if err != nil { return err } - defer func() { - _ = file.Close() - }() + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) + } else { + if filePath != "" { + ids := make([]string, 0) - reader := bufio.NewReader(file) - for { - docId, err := reader.ReadString('\n') + _, err := os.Stat(filePath) if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if docId != "" { - ids = append(ids, docId) - } - break + if os.IsNotExist(err) { + // does not exist + return err } + // other error + return err + } + // read index mapping file + file, err := os.Open(filePath) + if err != nil { return err } + defer func() { + _ = file.Close() + }() + + reader := bufio.NewReader(file) + for { + docIdBytes, _, err := reader.ReadLine() + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + docId := string(docIdBytes) + if docId != "" { + ids = append(ids, docId) + } + break + } - if docId != "" { - ids = append(ids, docId) + return err + } + docId := string(docIdBytes) + if docId != "" { + ids = append(ids, docId) + } } - } - } - // create client - client, err := dispatcher.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() + req := &distribute.BulkDeleteRequest{ + Ids: ids, + } - result, err := client.DeleteDocument(ids) - if err != nil { - return err - } + resp, err := client.BulkDelete(req) + if err != nil { + return err + } - resultBytes, err := json.MarshalIndent(result, "", " ") - if err != nil { - return err - } + resultBytes, err := marshaler.Marshal(resp) + if err != nil { + return err + } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) + } else { + return errors.New("argument error") + } + } return nil } diff --git a/cmd/blast/dispatcher_get.go b/cmd/blast/dispatcher_get.go index f46c7a7..cc01500 100644 --- a/cmd/blast/dispatcher_get.go +++ b/cmd/blast/dispatcher_get.go @@ -20,7 +20,7 @@ import ( "os" "github.com/mosuka/blast/dispatcher" - "github.com/mosuka/blast/protobuf/index" + "github.com/mosuka/blast/protobuf/distribute" "github.com/urfave/cli" ) @@ -43,17 +43,22 @@ func dispatcherGet(c *cli.Context) error { } }() - doc, err := client.GetDocument(id) + req := &distribute.GetRequest{ + Id: id, + } + + res, err := client.Get(req) if err != nil { return err } - docBytes, err := index.MarshalDocument(doc) + marshaler := dispatcher.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(docBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) return nil } diff --git a/cmd/blast/dispatcher_index.go b/cmd/blast/dispatcher_index.go index 86c2dd7..59dd811 100644 --- a/cmd/blast/dispatcher_index.go +++ b/cmd/blast/dispatcher_index.go @@ -16,13 +16,18 @@ package main import ( "bufio" + "bytes" "encoding/json" + "errors" "fmt" "io" "io/ioutil" "os" + "github.com/golang/protobuf/ptypes/any" "github.com/mosuka/blast/dispatcher" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/distribute" "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" ) @@ -31,52 +36,62 @@ func dispatcherIndex(c *cli.Context) error { grpcAddr := c.String("grpc-address") filePath := c.String("file") bulk := c.Bool("bulk") - id := c.Args().Get(0) - fieldsSrc := c.Args().Get(1) - docs := make([]*index.Document, 0) + // create gRPC client + client, err := dispatcher.NewGRPCClient(grpcAddr) + if err != nil { + return err + } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() + + marshaler := dispatcher.JsonMarshaler{} + + if c.NArg() >= 2 { + // index document by specifying ID and fields via standard input + id := c.Args().Get(0) + fieldsSrc := c.Args().Get(1) - if id != "" && fieldsSrc != "" { var fieldsMap map[string]interface{} err := json.Unmarshal([]byte(fieldsSrc), &fieldsMap) if err != nil { return err } - docMap := map[string]interface{}{ - "id": id, - "fields": fieldsMap, - } - docBytes, err := json.Marshal(docMap) + + fieldsAny := &any.Any{} + err = protobuf.UnmarshalAny(fieldsMap, fieldsAny) if err != nil { return err } - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - docs = append(docs, doc) - } - if filePath != "" { - _, err := os.Stat(filePath) + req := &distribute.IndexRequest{ + Id: id, + Fields: fieldsAny, + } + + res, err := client.Index(req) if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error return err } - // read index mapping file - file, err := os.Open(filePath) + resBytes, err := marshaler.Marshal(res) if err != nil { return err } - defer func() { - _ = file.Close() - }() + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + } else if c.NArg() == 1 { + // index document by specifying document(s) via standard input + docSrc := c.Args().Get(0) if bulk { - reader := bufio.NewReader(file) + // jsonl + docs := make([]*index.Document, 0) + reader := bufio.NewReader(bytes.NewReader([]byte(docSrc))) for { docBytes, err := reader.ReadBytes('\n') if err != nil { @@ -102,44 +117,157 @@ func dispatcherIndex(c *cli.Context) error { docs = append(docs, doc) } } + + req := &distribute.BulkIndexRequest{ + Documents: docs, + } + res, err := client.BulkIndex(req) + if err != nil { + return err + } + + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) } else { - docBytes, err := ioutil.ReadAll(file) + // json + var docMap map[string]interface{} + err := json.Unmarshal([]byte(docSrc), &docMap) if err != nil { return err } - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) + + fieldsAny := &any.Any{} + err = protobuf.UnmarshalAny(docMap["fields"].(map[string]interface{}), fieldsAny) if err != nil { return err } - docs = append(docs, doc) - } - } - // create gRPC client - client, err := dispatcher.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) + req := &distribute.IndexRequest{ + Id: docMap["id"].(string), + Fields: fieldsAny, + } + + res, err := client.Index(req) + if err != nil { + return err + } + + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) } - }() + } else { + // index document by specifying document(s) via file + if filePath != "" { + _, err := os.Stat(filePath) + if err != nil { + if os.IsNotExist(err) { + // does not exist + return err + } + // other error + return err + } - // index documents in bulk - count, err := client.IndexDocument(docs) - if err != nil { - return err - } + // read index mapping file + file, err := os.Open(filePath) + if err != nil { + return err + } + defer func() { + _ = file.Close() + }() - resultBytes, err := json.MarshalIndent(count, "", " ") - if err != nil { - return err - } + if bulk { + // jsonl + docs := make([]*index.Document, 0) + reader := bufio.NewReader(file) + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) + if err != nil { + return err + } + docs = append(docs, doc) + } + break + } + } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) + if len(docBytes) > 0 { + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) + if err != nil { + return err + } + docs = append(docs, doc) + } + } + + req := &distribute.BulkIndexRequest{ + Documents: docs, + } + res, err := client.BulkIndex(req) + if err != nil { + return err + } + + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + } else { + // json + docBytes, err := ioutil.ReadAll(file) + if err != nil { + return err + } + var docMap map[string]interface{} + err = json.Unmarshal(docBytes, &docMap) + if err != nil { + return err + } + + fieldsAny := &any.Any{} + err = protobuf.UnmarshalAny(docMap["fields"].(map[string]interface{}), fieldsAny) + if err != nil { + return err + } + + req := &distribute.IndexRequest{ + Id: docMap["id"].(string), + Fields: fieldsAny, + } + + res, err := client.Index(req) + if err != nil { + return err + } + + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + } + } else { + return errors.New("argument error") + } + } return nil } diff --git a/cmd/blast/dispatcher_node_health.go b/cmd/blast/dispatcher_node_health.go index 5fb1b8f..6594ffe 100644 --- a/cmd/blast/dispatcher_node_health.go +++ b/cmd/blast/dispatcher_node_health.go @@ -18,9 +18,8 @@ import ( "fmt" "os" - "github.com/mosuka/blast/protobuf/distribute" - "github.com/mosuka/blast/dispatcher" + "github.com/mosuka/blast/protobuf/distribute" "github.com/urfave/cli" ) @@ -41,30 +40,40 @@ func dispatcherNodeHealth(c *cli.Context) error { } }() - var state string + var res *distribute.NodeHealthCheckResponse if healthiness { - state, err = client.NodeHealthCheck(distribute.NodeHealthCheckRequest_HEALTHINESS.String()) + req := &distribute.NodeHealthCheckRequest{Probe: distribute.NodeHealthCheckRequest_HEALTHINESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = distribute.NodeHealthCheckResponse_UNHEALTHY.String() + res = &distribute.NodeHealthCheckResponse{State: distribute.NodeHealthCheckResponse_UNHEALTHY} } } else if liveness { - state, err = client.NodeHealthCheck(distribute.NodeHealthCheckRequest_LIVENESS.String()) + req := &distribute.NodeHealthCheckRequest{Probe: distribute.NodeHealthCheckRequest_LIVENESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = distribute.NodeHealthCheckResponse_DEAD.String() + res = &distribute.NodeHealthCheckResponse{State: distribute.NodeHealthCheckResponse_DEAD} } } else if readiness { - state, err = client.NodeHealthCheck(distribute.NodeHealthCheckRequest_READINESS.String()) + req := &distribute.NodeHealthCheckRequest{Probe: distribute.NodeHealthCheckRequest_READINESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = distribute.NodeHealthCheckResponse_NOT_READY.String() + res = &distribute.NodeHealthCheckResponse{State: distribute.NodeHealthCheckResponse_NOT_READY} } } else { - state, err = client.NodeHealthCheck(distribute.NodeHealthCheckRequest_HEALTHINESS.String()) + req := &distribute.NodeHealthCheckRequest{Probe: distribute.NodeHealthCheckRequest_HEALTHINESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = distribute.NodeHealthCheckResponse_UNHEALTHY.String() + res = &distribute.NodeHealthCheckResponse{State: distribute.NodeHealthCheckResponse_UNHEALTHY} } } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + marshaler := dispatcher.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) return nil } diff --git a/cmd/blast/dispatcher_search.go b/cmd/blast/dispatcher_search.go index 976e36a..bf6ccda 100644 --- a/cmd/blast/dispatcher_search.go +++ b/cmd/blast/dispatcher_search.go @@ -16,23 +16,27 @@ package main import ( "encoding/json" + "errors" "fmt" "io/ioutil" "os" "github.com/blevesearch/bleve" + "github.com/golang/protobuf/ptypes/any" "github.com/mosuka/blast/dispatcher" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/distribute" "github.com/urfave/cli" ) func dispatcherSearch(c *cli.Context) error { grpcAddr := c.String("grpc-address") - searchRequestPath := c.String("file") + filePath := c.String("file") searchRequest := bleve.NewSearchRequest(nil) - if searchRequestPath != "" { - _, err := os.Stat(searchRequestPath) + if filePath != "" { + _, err := os.Stat(filePath) if err != nil { if os.IsNotExist(err) { // does not exist @@ -43,23 +47,36 @@ func dispatcherSearch(c *cli.Context) error { } // open file - searchRequestFile, err := os.Open(searchRequestPath) + file, err := os.Open(filePath) if err != nil { return err } defer func() { - _ = searchRequestFile.Close() + _ = file.Close() }() // read file - searchRequestBytes, err := ioutil.ReadAll(searchRequestFile) + fileBytes, err := ioutil.ReadAll(file) if err != nil { return err } // create search request - if searchRequestBytes != nil { - err := json.Unmarshal(searchRequestBytes, searchRequest) + if fileBytes != nil { + var tmpValue map[string]interface{} + err = json.Unmarshal(fileBytes, &tmpValue) + if err != nil { + return err + } + searchRequestMap, ok := tmpValue["search_request"] + if !ok { + return errors.New("value does not exist") + } + searchRequestBytes, err := json.Marshal(searchRequestMap) + if err != nil { + return err + } + err = json.Unmarshal(searchRequestBytes, &searchRequest) if err != nil { return err } @@ -77,17 +94,26 @@ func dispatcherSearch(c *cli.Context) error { } }() - searchResult, err := client.Search(searchRequest) + searchRequestAny := &any.Any{} + err = protobuf.UnmarshalAny(searchRequest, searchRequestAny) + if err != nil { + return err + } + + req := &distribute.SearchRequest{SearchRequest: searchRequestAny} + + res, err := client.Search(req) if err != nil { return err } - jsonBytes, err := json.MarshalIndent(&searchResult, "", " ") + marshaler := dispatcher.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(jsonBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) return nil } diff --git a/cmd/blast/dispatcher_start.go b/cmd/blast/dispatcher_start.go index 534bea1..4b61df3 100644 --- a/cmd/blast/dispatcher_start.go +++ b/cmd/blast/dispatcher_start.go @@ -28,6 +28,7 @@ func dispatcherStart(c *cli.Context) error { managerAddr := c.String("manager-grpc-address") grpcAddr := c.String("grpc-address") + grpcGatewayAddr := c.String("grpc-gateway-address") httpAddr := c.String("http-address") logLevel := c.GlobalString("log-level") @@ -79,7 +80,7 @@ func dispatcherStart(c *cli.Context) error { httpLogCompress, ) - svr, err := dispatcher.NewServer(managerAddr, grpcAddr, httpAddr, logger, grpcLogger, httpAccessLogger) + svr, err := dispatcher.NewServer(managerAddr, grpcAddr, grpcGatewayAddr, httpAddr, logger, grpcLogger, httpAccessLogger) if err != nil { return err } diff --git a/cmd/blast/indexer_cluster_info.go b/cmd/blast/indexer_cluster_info.go index 434c011..7963655 100644 --- a/cmd/blast/indexer_cluster_info.go +++ b/cmd/blast/indexer_cluster_info.go @@ -15,10 +15,10 @@ package main import ( - "encoding/json" "fmt" "os" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/indexer" "github.com/urfave/cli" ) @@ -37,17 +37,20 @@ func indexerClusterInfo(c *cli.Context) error { } }() - cluster, err := client.ClusterInfo() + req := &empty.Empty{} + + resp, err := client.ClusterInfo(req) if err != nil { return err } - clusterBytes, err := json.MarshalIndent(cluster, "", " ") + marshaler := indexer.JsonMarshaler{} + respBytes, err := marshaler.Marshal(resp) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(clusterBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) return nil } diff --git a/cmd/blast/indexer_cluster_leave.go b/cmd/blast/indexer_cluster_leave.go index e564256..0793229 100644 --- a/cmd/blast/indexer_cluster_leave.go +++ b/cmd/blast/indexer_cluster_leave.go @@ -19,6 +19,7 @@ import ( "os" "github.com/mosuka/blast/indexer" + "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" ) @@ -46,10 +47,22 @@ func indexerClusterLeave(c *cli.Context) error { } }() - err = client.ClusterLeave(nodeId) + req := &index.ClusterLeaveRequest{ + Id: nodeId, + } + + resp, err := client.ClusterLeave(req) if err != nil { return err } + marshaler := indexer.JsonMarshaler{} + respBytes, err := marshaler.Marshal(resp) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) + return nil } diff --git a/cmd/blast/indexer_cluster_watch.go b/cmd/blast/indexer_cluster_watch.go index ba99bdb..a991b34 100644 --- a/cmd/blast/indexer_cluster_watch.go +++ b/cmd/blast/indexer_cluster_watch.go @@ -15,12 +15,12 @@ package main import ( - "encoding/json" "fmt" "io" "log" "os" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/indexer" "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" @@ -40,28 +40,31 @@ func indexerClusterWatch(c *cli.Context) error { } }() - cluster, err := client.ClusterInfo() + marshaler := indexer.JsonMarshaler{} + + req := &empty.Empty{} + clusterInfo, err := client.ClusterInfo(req) if err != nil { return err } resp := &index.ClusterWatchResponse{ Event: 0, Node: nil, - Cluster: cluster, + Cluster: clusterInfo.Cluster, } - clusterBytes, err := json.MarshalIndent(resp, "", " ") + respBytes, err := marshaler.Marshal(resp) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(clusterBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) - watchClient, err := client.ClusterWatch() + clusterWatchClient, err := client.ClusterWatch(req) if err != nil { return err } for { - resp, err := watchClient.Recv() + resp, err := clusterWatchClient.Recv() if err == io.EOF { break } @@ -69,12 +72,11 @@ func indexerClusterWatch(c *cli.Context) error { log.Println(err.Error()) break } - - clusterBytes, err = json.MarshalIndent(resp, "", " ") + respBytes, err = marshaler.Marshal(resp) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(clusterBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) } return nil diff --git a/cmd/blast/indexer_delete.go b/cmd/blast/indexer_delete.go index 7c1e1bd..b8aa834 100644 --- a/cmd/blast/indexer_delete.go +++ b/cmd/blast/indexer_delete.go @@ -16,12 +16,13 @@ package main import ( "bufio" - "encoding/json" + "errors" "fmt" "io" "os" "github.com/mosuka/blast/indexer" + "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" ) @@ -30,75 +31,95 @@ func indexerDelete(c *cli.Context) error { filePath := c.String("file") id := c.Args().Get(0) - ids := make([]string, 0) - - if id != "" { - ids = append(ids, id) + // create client + client, err := indexer.NewGRPCClient(grpcAddr) + if err != nil { + return err } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() + + marshaler := indexer.JsonMarshaler{} - if filePath != "" { - _, err := os.Stat(filePath) + if id != "" { + req := &index.DeleteRequest{ + Id: id, + } + resp, err := client.Delete(req) if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error return err } - - // read index mapping file - file, err := os.Open(filePath) + respBytes, err := marshaler.Marshal(resp) if err != nil { return err } - defer func() { - _ = file.Close() - }() + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) + } else { + if filePath != "" { + ids := make([]string, 0) - reader := bufio.NewReader(file) - for { - docId, err := reader.ReadString('\n') + _, err := os.Stat(filePath) if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if docId != "" { - ids = append(ids, docId) - } - break + if os.IsNotExist(err) { + // does not exist + return err } + // other error + return err + } + // read index mapping file + file, err := os.Open(filePath) + if err != nil { return err } + defer func() { + _ = file.Close() + }() + + reader := bufio.NewReader(file) + for { + docIdBytes, _, err := reader.ReadLine() + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + docId := string(docIdBytes) + if docId != "" { + ids = append(ids, docId) + } + break + } - if docId != "" { - ids = append(ids, docId) + return err + } + docId := string(docIdBytes) + if docId != "" { + ids = append(ids, docId) + } } - } - } - // create client - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() + req := &index.BulkDeleteRequest{ + Ids: ids, + } - result, err := client.DeleteDocument(ids) - if err != nil { - return err - } + resp, err := client.BulkDelete(req) + if err != nil { + return err + } - resultBytes, err := json.MarshalIndent(result, "", " ") - if err != nil { - return err - } + resultBytes, err := marshaler.Marshal(resp) + if err != nil { + return err + } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) + } else { + return errors.New("argument error") + } + } return nil } diff --git a/cmd/blast/indexer_get.go b/cmd/blast/indexer_get.go index 53abb27..976e4be 100644 --- a/cmd/blast/indexer_get.go +++ b/cmd/blast/indexer_get.go @@ -43,17 +43,22 @@ func indexerGet(c *cli.Context) error { } }() - doc, err := client.GetDocument(id) + req := &index.GetRequest{ + Id: id, + } + + resp, err := client.Get(req) if err != nil { return err } - docBytes, err := index.MarshalDocument(doc) + marshaler := indexer.JsonMarshaler{} + respBytes, err := marshaler.Marshal(resp) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(docBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) return nil } diff --git a/cmd/blast/indexer_index.go b/cmd/blast/indexer_index.go index c70e6ea..7f5521c 100644 --- a/cmd/blast/indexer_index.go +++ b/cmd/blast/indexer_index.go @@ -16,15 +16,18 @@ package main import ( "bufio" + "bytes" "encoding/json" + "errors" "fmt" "io" "io/ioutil" "os" - "github.com/mosuka/blast/protobuf/index" - + "github.com/golang/protobuf/ptypes/any" "github.com/mosuka/blast/indexer" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" ) @@ -32,52 +35,62 @@ func indexerIndex(c *cli.Context) error { grpcAddr := c.String("grpc-address") filePath := c.String("file") bulk := c.Bool("bulk") - id := c.Args().Get(0) - fieldsSrc := c.Args().Get(1) - docs := make([]*index.Document, 0) + // create gRPC client + client, err := indexer.NewGRPCClient(grpcAddr) + if err != nil { + return err + } + defer func() { + err := client.Close() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + } + }() - if id != "" && fieldsSrc != "" { - var fieldsMap map[string]interface{} - err := json.Unmarshal([]byte(fieldsSrc), &fieldsMap) + marshaler := indexer.JsonMarshaler{} + + if c.NArg() >= 2 { + // index document by specifying ID and fields via standard input + id := c.Args().Get(0) + docSrc := c.Args().Get(1) + + var docMap map[string]interface{} + err := json.Unmarshal([]byte(docSrc), &docMap) if err != nil { return err } - docMap := map[string]interface{}{ - "id": id, - "fields": fieldsMap, - } - docBytes, err := json.Marshal(docMap) + + fieldsAny := &any.Any{} + err = protobuf.UnmarshalAny(docMap["fields"], fieldsAny) if err != nil { return err } - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - docs = append(docs, doc) - } - if filePath != "" { - _, err := os.Stat(filePath) + req := &index.IndexRequest{ + Id: id, + Fields: fieldsAny, + } + + res, err := client.Index(req) if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error return err } - // read index mapping file - file, err := os.Open(filePath) + resBytes, err := marshaler.Marshal(res) if err != nil { return err } - defer func() { - _ = file.Close() - }() + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + } else if c.NArg() == 1 { + // index document by specifying document(s) via standard input + docSrc := c.Args().Get(0) if bulk { - reader := bufio.NewReader(file) + // jsonl + docs := make([]*index.Document, 0) + reader := bufio.NewReader(bytes.NewReader([]byte(docSrc))) for { docBytes, err := reader.ReadBytes('\n') if err != nil { @@ -103,44 +116,157 @@ func indexerIndex(c *cli.Context) error { docs = append(docs, doc) } } + + req := &index.BulkIndexRequest{ + Documents: docs, + } + res, err := client.BulkIndex(req) + if err != nil { + return err + } + + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) } else { - docBytes, err := ioutil.ReadAll(file) + // json + var docMap map[string]interface{} + err := json.Unmarshal([]byte(docSrc), &docMap) if err != nil { return err } - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) + + fieldsAny := &any.Any{} + err = protobuf.UnmarshalAny(docMap["fields"].(map[string]interface{}), fieldsAny) if err != nil { return err } - docs = append(docs, doc) - } - } - // create gRPC client - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) + req := &index.IndexRequest{ + Id: docMap["id"].(string), + Fields: fieldsAny, + } + + res, err := client.Index(req) + if err != nil { + return err + } + + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) } - }() + } else { + // index document by specifying document(s) via file + if filePath != "" { + _, err := os.Stat(filePath) + if err != nil { + if os.IsNotExist(err) { + // does not exist + return err + } + // other error + return err + } - // index documents in bulk - count, err := client.IndexDocument(docs) - if err != nil { - return err - } + // read index mapping file + file, err := os.Open(filePath) + if err != nil { + return err + } + defer func() { + _ = file.Close() + }() - resultBytes, err := json.MarshalIndent(count, "", " ") - if err != nil { - return err - } + if bulk { + // jsonl + docs := make([]*index.Document, 0) + reader := bufio.NewReader(file) + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) + if err != nil { + return err + } + docs = append(docs, doc) + } + break + } + } + + if len(docBytes) > 0 { + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) + if err != nil { + return err + } + docs = append(docs, doc) + } + } + + req := &index.BulkIndexRequest{ + Documents: docs, + } + res, err := client.BulkIndex(req) + if err != nil { + return err + } + + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + } else { + // json + docBytes, err := ioutil.ReadAll(file) + if err != nil { + return err + } + var docMap map[string]interface{} + err = json.Unmarshal(docBytes, &docMap) + if err != nil { + return err + } + + fieldsAny := &any.Any{} + err = protobuf.UnmarshalAny(docMap["fields"].(map[string]interface{}), fieldsAny) + if err != nil { + return err + } + + req := &index.IndexRequest{ + Id: docMap["id"].(string), + Fields: fieldsAny, + } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) + res, err := client.Index(req) + if err != nil { + return err + } + + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + } + } else { + return errors.New("argument error") + } + } return nil } diff --git a/cmd/blast/indexer_node_health.go b/cmd/blast/indexer_node_health.go index aedb6eb..e818992 100644 --- a/cmd/blast/indexer_node_health.go +++ b/cmd/blast/indexer_node_health.go @@ -40,30 +40,40 @@ func indexerNodeHealth(c *cli.Context) error { } }() - var state string + var res *index.NodeHealthCheckResponse if healthiness { - state, err = client.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) + req := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_HEALTHINESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = index.NodeHealthCheckResponse_UNHEALTHY.String() + res = &index.NodeHealthCheckResponse{State: index.NodeHealthCheckResponse_UNHEALTHY} } } else if liveness { - state, err = client.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) + req := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_LIVENESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = index.NodeHealthCheckResponse_DEAD.String() + res = &index.NodeHealthCheckResponse{State: index.NodeHealthCheckResponse_DEAD} } } else if readiness { - state, err = client.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) + req := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_READINESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = index.NodeHealthCheckResponse_NOT_READY.String() + res = &index.NodeHealthCheckResponse{State: index.NodeHealthCheckResponse_NOT_READY} } } else { - state, err = client.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) + req := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_HEALTHINESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = index.NodeHealthCheckResponse_UNHEALTHY.String() + res = &index.NodeHealthCheckResponse{State: index.NodeHealthCheckResponse_UNHEALTHY} } } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + marshaler := indexer.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) return nil } diff --git a/cmd/blast/indexer_node_info.go b/cmd/blast/indexer_node_info.go index 0ab3ad5..610403f 100644 --- a/cmd/blast/indexer_node_info.go +++ b/cmd/blast/indexer_node_info.go @@ -15,10 +15,10 @@ package main import ( - "encoding/json" "fmt" "os" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/indexer" "github.com/urfave/cli" ) @@ -37,12 +37,16 @@ func indexerNodeInfo(c *cli.Context) error { } }() - node, err := client.NodeInfo() + req := &empty.Empty{} + + res, err := client.NodeInfo(req) if err != nil { return err } - nodeBytes, err := json.MarshalIndent(node, "", " ") + marshaler := indexer.JsonMarshaler{} + + nodeBytes, err := marshaler.Marshal(res) if err != nil { return err } diff --git a/cmd/blast/indexer_search.go b/cmd/blast/indexer_search.go index 5c250dc..2a7d4b0 100644 --- a/cmd/blast/indexer_search.go +++ b/cmd/blast/indexer_search.go @@ -16,23 +16,27 @@ package main import ( "encoding/json" + "errors" "fmt" "io/ioutil" "os" "github.com/blevesearch/bleve" + "github.com/golang/protobuf/ptypes/any" "github.com/mosuka/blast/indexer" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" ) func indexerSearch(c *cli.Context) error { grpcAddr := c.String("grpc-address") - searchRequestPath := c.String("file") + filePath := c.String("file") searchRequest := bleve.NewSearchRequest(nil) - if searchRequestPath != "" { - _, err := os.Stat(searchRequestPath) + if filePath != "" { + _, err := os.Stat(filePath) if err != nil { if os.IsNotExist(err) { // does not exist @@ -43,23 +47,36 @@ func indexerSearch(c *cli.Context) error { } // open file - searchRequestFile, err := os.Open(searchRequestPath) + file, err := os.Open(filePath) if err != nil { return err } defer func() { - _ = searchRequestFile.Close() + _ = file.Close() }() // read file - searchRequestBytes, err := ioutil.ReadAll(searchRequestFile) + fileBytes, err := ioutil.ReadAll(file) if err != nil { return err } // create search request - if searchRequestBytes != nil { - err := json.Unmarshal(searchRequestBytes, searchRequest) + if fileBytes != nil { + var tmpValue map[string]interface{} + err = json.Unmarshal(fileBytes, &tmpValue) + if err != nil { + return err + } + searchRequestMap, ok := tmpValue["search_request"] + if !ok { + return errors.New("search_request does not exist") + } + searchRequestBytes, err := json.Marshal(searchRequestMap) + if err != nil { + return err + } + err = json.Unmarshal(searchRequestBytes, &searchRequest) if err != nil { return err } @@ -77,17 +94,26 @@ func indexerSearch(c *cli.Context) error { } }() - searchResult, err := client.Search(searchRequest) + searchRequestAny := &any.Any{} + err = protobuf.UnmarshalAny(searchRequest, searchRequestAny) + if err != nil { + return err + } + + req := &index.SearchRequest{SearchRequest: searchRequestAny} + + res, err := client.Search(req) if err != nil { return err } - jsonBytes, err := json.MarshalIndent(&searchResult, "", " ") + marshaler := indexer.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(jsonBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) return nil } diff --git a/cmd/blast/indexer_snapshot.go b/cmd/blast/indexer_snapshot.go index c34459a..bad2cf5 100644 --- a/cmd/blast/indexer_snapshot.go +++ b/cmd/blast/indexer_snapshot.go @@ -18,6 +18,7 @@ import ( "fmt" "os" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/indexer" "github.com/urfave/cli" ) @@ -36,10 +37,20 @@ func indexerSnapshot(c *cli.Context) error { } }() - err = client.Snapshot() + req := &empty.Empty{} + + res, err := client.Snapshot(req) + if err != nil { + return err + } + + marshaler := indexer.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) if err != nil { return err } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + return nil } diff --git a/cmd/blast/indexer_start.go b/cmd/blast/indexer_start.go index b20e689..d01b076 100644 --- a/cmd/blast/indexer_start.go +++ b/cmd/blast/indexer_start.go @@ -33,6 +33,7 @@ func indexerStart(c *cli.Context) error { peerGRPCAddr := c.String("peer-grpc-address") grpcAddr := c.String("grpc-address") + grpcGatewayAddr := c.String("grpc-gateway-address") httpAddr := c.String("http-address") nodeId := c.String("node-id") @@ -98,8 +99,9 @@ func indexerStart(c *cli.Context) error { BindAddress: nodeAddr, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddr, - HttpAddress: httpAddr, + GrpcAddress: grpcAddr, + GrpcGatewayAddress: grpcGatewayAddr, + HttpAddress: httpAddr, }, } diff --git a/cmd/blast/main.go b/cmd/blast/main.go index e889f32..7183f17 100644 --- a/cmd/blast/main.go +++ b/cmd/blast/main.go @@ -57,6 +57,12 @@ func main() { EnvVar: "BLAST_MANAGER_GRPC_ADDRESS", Usage: "The gRPC listen address", }, + cli.StringFlag{ + Name: "grpc-gateway-address", + Value: ":6100", + EnvVar: "BLAST_MANAGER_GRPC_GATEWAY_ADDRESS", + Usage: "The gRPC gateway listen address", + }, cli.StringFlag{ Name: "http-address", Value: ":8100", @@ -399,6 +405,12 @@ func main() { EnvVar: "BLAST_INDEXER_GRPC_ADDRESS", Usage: "The gRPC listen address", }, + cli.StringFlag{ + Name: "grpc-gateway-address", + Value: ":6000", + EnvVar: "BLAST_INDEXER_GRPC_GATEWAY_ADDRESS", + Usage: "The gRPC gateway listen address", + }, cli.StringFlag{ Name: "http-address", Value: ":8000", @@ -758,6 +770,12 @@ func main() { EnvVar: "BLAST_DISPATCHER_GRPC_ADDRESS", Usage: "The gRPC listen address", }, + cli.StringFlag{ + Name: "grpc-gateway-address", + Value: ":6200", + EnvVar: "BLAST_DISPATCHER_GRPC_GATEWAY_ADDRESS", + Usage: "The gRPC gateway listen address", + }, cli.StringFlag{ Name: "http-address", Value: ":8200", diff --git a/cmd/blast/manager_cluster_info.go b/cmd/blast/manager_cluster_info.go index 2ccc08d..8b0a25a 100644 --- a/cmd/blast/manager_cluster_info.go +++ b/cmd/blast/manager_cluster_info.go @@ -15,10 +15,10 @@ package main import ( - "encoding/json" "fmt" "os" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/manager" "github.com/urfave/cli" ) @@ -37,17 +37,19 @@ func managerClusterInfo(c *cli.Context) error { } }() - cluster, err := client.ClusterInfo() + req := &empty.Empty{} + res, err := client.ClusterInfo(req) if err != nil { return err } - clusterBytes, err := json.MarshalIndent(cluster, "", " ") + marshaler := manager.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(clusterBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) return nil } diff --git a/cmd/blast/manager_cluster_leave.go b/cmd/blast/manager_cluster_leave.go index 408f0ec..12ae8e1 100644 --- a/cmd/blast/manager_cluster_leave.go +++ b/cmd/blast/manager_cluster_leave.go @@ -19,6 +19,7 @@ import ( "os" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf/management" "github.com/urfave/cli" ) @@ -42,10 +43,21 @@ func managerClusterLeave(c *cli.Context) error { } }() - err = client.ClusterLeave(nodeId) + req := &management.ClusterLeaveRequest{ + Id: nodeId, + } + res, err := client.ClusterLeave(req) + if err != nil { + return err + } + + marshaler := manager.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) if err != nil { return err } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + return nil } diff --git a/cmd/blast/manager_cluster_watch.go b/cmd/blast/manager_cluster_watch.go index 775350b..320965c 100644 --- a/cmd/blast/manager_cluster_watch.go +++ b/cmd/blast/manager_cluster_watch.go @@ -15,12 +15,12 @@ package main import ( - "encoding/json" "fmt" "io" "log" "os" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/manager" "github.com/mosuka/blast/protobuf/management" "github.com/urfave/cli" @@ -40,28 +40,31 @@ func managerClusterWatch(c *cli.Context) error { } }() - cluster, err := client.ClusterInfo() + marshaler := manager.JsonMarshaler{} + + req := &empty.Empty{} + res, err := client.ClusterInfo(req) if err != nil { return err } resp := &management.ClusterWatchResponse{ Event: 0, Node: nil, - Cluster: cluster, + Cluster: res.Cluster, } - clusterBytes, err := json.MarshalIndent(resp, "", " ") + resBytes, err := marshaler.Marshal(resp) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(clusterBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - watchClient, err := client.ClusterWatch() + watchClient, err := client.ClusterWatch(req) if err != nil { return err } for { - resp, err := watchClient.Recv() + resp, err = watchClient.Recv() if err == io.EOF { break } @@ -70,11 +73,11 @@ func managerClusterWatch(c *cli.Context) error { break } - clusterBytes, err := json.MarshalIndent(resp, "", " ") + resBytes, err = marshaler.Marshal(resp) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(clusterBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) } return nil diff --git a/cmd/blast/manager_delete.go b/cmd/blast/manager_delete.go index e6c41e6..0caf391 100644 --- a/cmd/blast/manager_delete.go +++ b/cmd/blast/manager_delete.go @@ -20,6 +20,7 @@ import ( "os" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf/management" "github.com/urfave/cli" ) @@ -43,10 +44,21 @@ func managerDelete(c *cli.Context) error { } }() - err = client.Delete(key) + req := &management.DeleteRequest{ + Key: key, + } + res, err := client.Delete(req) + if err != nil { + return err + } + + marshaler := manager.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) if err != nil { return err } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + return nil } diff --git a/cmd/blast/manager_get.go b/cmd/blast/manager_get.go index f0e3fe7..6b41f0e 100644 --- a/cmd/blast/manager_get.go +++ b/cmd/blast/manager_get.go @@ -15,11 +15,11 @@ package main import ( - "encoding/json" "fmt" "os" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf/management" "github.com/urfave/cli" ) @@ -39,16 +39,22 @@ func managerGet(c *cli.Context) error { } }() - value, err := client.Get(key) + req := &management.GetRequest{ + Key: key, + } + + res, err := client.Get(req) if err != nil { return err } - valueBytes, err := json.MarshalIndent(value, "", " ") + marshaler := manager.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(valueBytes))) + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) return nil } diff --git a/cmd/blast/manager_node_health.go b/cmd/blast/manager_node_health.go index 9967f91..e2eb209 100644 --- a/cmd/blast/manager_node_health.go +++ b/cmd/blast/manager_node_health.go @@ -40,30 +40,40 @@ func managerNodeHealthCheck(c *cli.Context) error { } }() - var state string + var res *management.NodeHealthCheckResponse if healthiness { - state, err = client.NodeHealthCheck(management.NodeHealthCheckRequest_HEALTHINESS.String()) + req := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_HEALTHINESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = management.NodeHealthCheckResponse_UNHEALTHY.String() + res = &management.NodeHealthCheckResponse{State: management.NodeHealthCheckResponse_UNHEALTHY} } } else if liveness { - state, err = client.NodeHealthCheck(management.NodeHealthCheckRequest_LIVENESS.String()) + req := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_LIVENESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = management.NodeHealthCheckResponse_DEAD.String() + res = &management.NodeHealthCheckResponse{State: management.NodeHealthCheckResponse_DEAD} } } else if readiness { - state, err = client.NodeHealthCheck(management.NodeHealthCheckRequest_READINESS.String()) + req := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_READINESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = management.NodeHealthCheckResponse_NOT_READY.String() + res = &management.NodeHealthCheckResponse{State: management.NodeHealthCheckResponse_NOT_READY} } } else { - state, err = client.NodeHealthCheck(management.NodeHealthCheckRequest_HEALTHINESS.String()) + req := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_HEALTHINESS} + res, err = client.NodeHealthCheck(req) if err != nil { - state = management.NodeHealthCheckResponse_UNHEALTHY.String() + res = &management.NodeHealthCheckResponse{State: management.NodeHealthCheckResponse_UNHEALTHY} } } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + marshaler := manager.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) return nil } diff --git a/cmd/blast/manager_node_info.go b/cmd/blast/manager_node_info.go index 85314a2..ca190e1 100644 --- a/cmd/blast/manager_node_info.go +++ b/cmd/blast/manager_node_info.go @@ -15,10 +15,10 @@ package main import ( - "encoding/json" "fmt" "os" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/manager" "github.com/urfave/cli" ) @@ -37,17 +37,19 @@ func managerNodeInfo(c *cli.Context) error { } }() - node, err := client.NodeInfo() + req := &empty.Empty{} + res, err := client.NodeInfo(req) if err != nil { return err } - nodeBytes, err := json.MarshalIndent(node, "", " ") + marshaler := manager.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(nodeBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) return nil } diff --git a/cmd/blast/manager_set.go b/cmd/blast/manager_set.go index 02c3fa1..f7bdac8 100644 --- a/cmd/blast/manager_set.go +++ b/cmd/blast/manager_set.go @@ -20,7 +20,10 @@ import ( "fmt" "os" + "github.com/golang/protobuf/ptypes/any" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/management" "github.com/urfave/cli" ) @@ -61,10 +64,29 @@ func managerSet(c *cli.Context) error { } }() - err = client.Set(key, value) + valueAny := &any.Any{} + err = protobuf.UnmarshalAny(value, valueAny) if err != nil { return err } + req := &management.SetRequest{ + Key: key, + Value: valueAny, + } + + res, err := client.Set(req) + if err != nil { + return err + } + + marshaler := manager.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + return nil } diff --git a/cmd/blast/manager_snapshot.go b/cmd/blast/manager_snapshot.go index 8dd9b71..f252e34 100644 --- a/cmd/blast/manager_snapshot.go +++ b/cmd/blast/manager_snapshot.go @@ -18,6 +18,7 @@ import ( "fmt" "os" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/manager" "github.com/urfave/cli" ) @@ -36,10 +37,19 @@ func managerSnapshot(c *cli.Context) error { } }() - err = client.Snapshot() + req := &empty.Empty{} + res, err := client.Snapshot(req) if err != nil { return err } + marshaler := manager.JsonMarshaler{} + resBytes, err := marshaler.Marshal(res) + if err != nil { + return err + } + + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) + return nil } diff --git a/cmd/blast/manager_start.go b/cmd/blast/manager_start.go index 81385ab..94a7445 100644 --- a/cmd/blast/manager_start.go +++ b/cmd/blast/manager_start.go @@ -31,6 +31,7 @@ func managerStart(c *cli.Context) error { peerGrpcAddr := c.String("peer-grpc-address") grpcAddr := c.String("grpc-address") + grpcGatewayAddr := c.String("grpc-gateway-address") httpAddr := c.String("http-address") nodeId := c.String("node-id") @@ -96,8 +97,9 @@ func managerStart(c *cli.Context) error { BindAddress: nodeAddr, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddr, - HttpAddress: httpAddr, + GrpcAddress: grpcAddr, + GrpcGatewayAddress: grpcGatewayAddr, + HttpAddress: httpAddr, }, } diff --git a/cmd/blast/manager_watch.go b/cmd/blast/manager_watch.go index bab09af..ff010df 100644 --- a/cmd/blast/manager_watch.go +++ b/cmd/blast/manager_watch.go @@ -15,15 +15,13 @@ package main import ( - "encoding/json" - "errors" "fmt" "io" "log" "os" "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/management" "github.com/urfave/cli" ) @@ -43,11 +41,16 @@ func managerWatch(c *cli.Context) error { } }() - watchClient, err := client.Watch(key) + req := &management.WatchRequest{ + Key: key, + } + watchClient, err := client.Watch(req) if err != nil { return err } + marshaler := manager.JsonMarshaler{} + for { resp, err := watchClient.Recv() if err == io.EOF { @@ -58,29 +61,13 @@ func managerWatch(c *cli.Context) error { break } - value, err := protobuf.MarshalAny(resp.Value) + respBytes, err := marshaler.Marshal(resp) if err != nil { - return err - } - if value == nil { - return errors.New("nil") + log.Println(err.Error()) + break } - var valueBytes []byte - switch value.(type) { - case *map[string]interface{}: - valueMap := *value.(*map[string]interface{}) - valueBytes, err = json.Marshal(valueMap) - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%s %s %v", resp.Command.String(), resp.Key, string(valueBytes))) - case *string: - valueStr := *value.(*string) - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%s %s %s", resp.Command.String(), resp.Key, valueStr)) - default: - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%s %s %v", resp.Command.String(), resp.Key, &value)) - } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) } return nil diff --git a/dispatcher/grpc_client.go b/dispatcher/grpc_client.go index b7cf07d..5ca4658 100644 --- a/dispatcher/grpc_client.go +++ b/dispatcher/grpc_client.go @@ -16,18 +16,11 @@ package dispatcher import ( "context" - "errors" "math" - "github.com/blevesearch/bleve" - "github.com/golang/protobuf/ptypes/any" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/protobuf" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/protobuf/distribute" - "github.com/mosuka/blast/protobuf/index" "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) type GRPCClient struct { @@ -96,134 +89,30 @@ func (c *GRPCClient) GetAddress() string { return c.conn.Target() } -func (c *GRPCClient) NodeHealthCheck(probe string, opts ...grpc.CallOption) (string, error) { - req := &distribute.NodeHealthCheckRequest{} - - switch probe { - case distribute.NodeHealthCheckRequest_HEALTHINESS.String(): - req.Probe = distribute.NodeHealthCheckRequest_HEALTHINESS - case distribute.NodeHealthCheckRequest_LIVENESS.String(): - req.Probe = distribute.NodeHealthCheckRequest_LIVENESS - case distribute.NodeHealthCheckRequest_READINESS.String(): - req.Probe = distribute.NodeHealthCheckRequest_READINESS - default: - req.Probe = distribute.NodeHealthCheckRequest_HEALTHINESS - } - - resp, err := c.client.NodeHealthCheck(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - return distribute.NodeHealthCheckResponse_UNHEALTHY.String(), errors.New(st.Message()) - } - - return resp.State.String(), nil +func (c *GRPCClient) NodeHealthCheck(req *distribute.NodeHealthCheckRequest, opts ...grpc.CallOption) (*distribute.NodeHealthCheckResponse, error) { + return c.client.NodeHealthCheck(c.ctx, req, opts...) } -func (c *GRPCClient) GetDocument(id string, opts ...grpc.CallOption) (*index.Document, error) { - req := &distribute.GetDocumentRequest{ - Id: id, - } - - resp, err := c.client.GetDocument(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - switch st.Code() { - case codes.NotFound: - return nil, blasterrors.ErrNotFound - default: - return nil, errors.New(st.Message()) - } - } - - return resp.Document, nil +func (c *GRPCClient) Get(req *distribute.GetRequest, opts ...grpc.CallOption) (*distribute.GetResponse, error) { + return c.client.Get(c.ctx, req, opts...) } -func (c *GRPCClient) Search(searchRequest *bleve.SearchRequest, opts ...grpc.CallOption) (*bleve.SearchResult, error) { - // bleve.SearchRequest -> Any - searchRequestAny := &any.Any{} - err := protobuf.UnmarshalAny(searchRequest, searchRequestAny) - if err != nil { - return nil, err - } - - req := &distribute.SearchRequest{ - SearchRequest: searchRequestAny, - } - - resp, err := c.client.Search(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - - // Any -> bleve.SearchResult - searchResultInstance, err := protobuf.MarshalAny(resp.SearchResult) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - if searchResultInstance == nil { - return nil, errors.New("nil") - } - searchResult := searchResultInstance.(*bleve.SearchResult) - - return searchResult, nil +func (c *GRPCClient) Index(req *distribute.IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.Index(c.ctx, req, opts...) } -func (c *GRPCClient) IndexDocument(docs []*index.Document, opts ...grpc.CallOption) (int, error) { - stream, err := c.client.IndexDocument(c.ctx, opts...) - if err != nil { - st, _ := status.FromError(err) - - return -1, errors.New(st.Message()) - } - - for _, doc := range docs { - req := &distribute.IndexDocumentRequest{ - Document: doc, - } - - err = stream.Send(req) - if err != nil { - return -1, err - } - } - - resp, err := stream.CloseAndRecv() - if err != nil { - return -1, err - } - - return int(resp.Count), nil +func (c *GRPCClient) Delete(req *distribute.DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.Delete(c.ctx, req, opts...) } -func (c *GRPCClient) DeleteDocument(ids []string, opts ...grpc.CallOption) (int, error) { - stream, err := c.client.DeleteDocument(c.ctx, opts...) - if err != nil { - st, _ := status.FromError(err) - - return -1, errors.New(st.Message()) - } - - for _, id := range ids { - req := &distribute.DeleteDocumentRequest{ - Id: id, - } - - err := stream.Send(req) - if err != nil { - return -1, err - } - } +func (c *GRPCClient) BulkIndex(req *distribute.BulkIndexRequest, opts ...grpc.CallOption) (*distribute.BulkIndexResponse, error) { + return c.client.BulkIndex(c.ctx, req, opts...) +} - resp, err := stream.CloseAndRecv() - if err != nil { - return -1, err - } +func (c *GRPCClient) BulkDelete(req *distribute.BulkDeleteRequest, opts ...grpc.CallOption) (*distribute.BulkDeleteResponse, error) { + return c.client.BulkDelete(c.ctx, req, opts...) +} - return int(resp.Count), nil +func (c *GRPCClient) Search(req *distribute.SearchRequest, opts ...grpc.CallOption) (*distribute.SearchResponse, error) { + return c.client.Search(c.ctx, req, opts...) } diff --git a/dispatcher/grpc_gateway.go b/dispatcher/grpc_gateway.go new file mode 100644 index 0000000..f962b4e --- /dev/null +++ b/dispatcher/grpc_gateway.go @@ -0,0 +1,353 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dispatcher + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + + "github.com/blevesearch/bleve" + "github.com/golang/protobuf/ptypes/any" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/distribute" + "github.com/mosuka/blast/protobuf/index" + "go.uber.org/zap" + "google.golang.org/grpc" +) + +type JsonMarshaler struct{} + +// ContentType always Returns "application/json". +func (*JsonMarshaler) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JsonMarshaler) Marshal(v interface{}) ([]byte, error) { + switch v.(type) { + case *distribute.GetResponse: + value, err := protobuf.MarshalAny(v.(*distribute.GetResponse).Fields) + if err != nil { + return nil, err + } + return json.Marshal( + map[string]interface{}{ + "fields": value, + }, + ) + case *distribute.SearchResponse: + value, err := protobuf.MarshalAny(v.(*distribute.SearchResponse).SearchResult) + if err != nil { + return nil, err + } + return json.Marshal( + map[string]interface{}{ + "search_result": value, + }, + ) + default: + return json.Marshal(v) + } +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JsonMarshaler) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JsonMarshaler) NewDecoder(r io.Reader) runtime.Decoder { + return runtime.DecoderFunc( + func(v interface{}) error { + buffer, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + switch v.(type) { + case *distribute.IndexRequest: + var tmpValue map[string]interface{} + err = json.Unmarshal(buffer, &tmpValue) + if err != nil { + return err + } + id, ok := tmpValue["id"].(string) + if ok { + v.(*distribute.IndexRequest).Id = id + } + + fields, ok := tmpValue["fields"] + if !ok { + return errors.New("value does not exist") + } + v.(*distribute.IndexRequest).Fields = &any.Any{} + return protobuf.UnmarshalAny(fields, v.(*distribute.IndexRequest).Fields) + case *distribute.SearchRequest: + var tmpValue map[string]interface{} + err = json.Unmarshal(buffer, &tmpValue) + if err != nil { + return err + } + searchRequestMap, ok := tmpValue["search_request"] + if !ok { + return errors.New("value does not exist") + } + searchRequestBytes, err := json.Marshal(searchRequestMap) + if err != nil { + return err + } + var searchRequest *bleve.SearchRequest + err = json.Unmarshal(searchRequestBytes, &searchRequest) + if err != nil { + return err + } + v.(*distribute.SearchRequest).SearchRequest = &any.Any{} + return protobuf.UnmarshalAny(searchRequest, v.(*distribute.SearchRequest).SearchRequest) + default: + return json.Unmarshal(buffer, v) + } + }, + ) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JsonMarshaler) NewEncoder(w io.Writer) runtime.Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JsonMarshaler) Delimiter() []byte { + return []byte("\n") +} + +type JsonlMarshaler struct{} + +// ContentType always Returns "application/json". +func (*JsonlMarshaler) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JsonlMarshaler) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JsonlMarshaler) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads JSON-LINE stream from "r". +func (j *JsonlMarshaler) NewDecoder(r io.Reader) runtime.Decoder { + return runtime.DecoderFunc( + func(v interface{}) error { + buffer, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + switch v.(type) { + case *distribute.BulkIndexRequest: + docs := make([]*index.Document, 0) + reader := bufio.NewReader(bytes.NewReader(buffer)) + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) + if err != nil { + return err + } + docs = append(docs, doc) + } + break + } + } + + if len(docBytes) > 0 { + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) + if err != nil { + return err + } + docs = append(docs, doc) + } + } + v.(*distribute.BulkIndexRequest).Documents = docs + return nil + default: + return json.Unmarshal(buffer, v) + } + }, + ) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JsonlMarshaler) NewEncoder(w io.Writer) runtime.Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JsonlMarshaler) Delimiter() []byte { + return []byte("\n") +} + +type TextMarshaler struct{} + +// ContentType always Returns "application/json". +func (*TextMarshaler) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *TextMarshaler) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal unmarshals JSON data into "v". +func (j *TextMarshaler) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads text stream from "r". +func (j *TextMarshaler) NewDecoder(r io.Reader) runtime.Decoder { + return runtime.DecoderFunc( + func(v interface{}) error { + buffer, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + switch v.(type) { + case *distribute.BulkDeleteRequest: + ids := make([]string, 0) + reader := bufio.NewReader(bytes.NewReader(buffer)) + for { + //idBytes, err := reader.ReadBytes('\n') + idBytes, _, err := reader.ReadLine() + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(idBytes) > 0 { + ids = append(ids, string(idBytes)) + } + break + } + } + + if len(idBytes) > 0 { + ids = append(ids, string(idBytes)) + } + } + v.(*distribute.BulkDeleteRequest).Ids = ids + return nil + default: + return json.Unmarshal(buffer, v) + } + }, + ) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *TextMarshaler) NewEncoder(w io.Writer) runtime.Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *TextMarshaler) Delimiter() []byte { + return []byte("\n") +} + +type GRPCGateway struct { + grpcGatewayAddr string + grpcAddr string + logger *zap.Logger + + ctx context.Context + cancel context.CancelFunc + listener net.Listener +} + +func NewGRPCGateway(grpcGatewayAddr string, grpcAddr string, logger *zap.Logger) (*GRPCGateway, error) { + return &GRPCGateway{ + grpcGatewayAddr: grpcGatewayAddr, + grpcAddr: grpcAddr, + logger: logger, + }, nil +} + +func (s *GRPCGateway) Start() error { + s.ctx, s.cancel = NewGRPCContext() + + mux := runtime.NewServeMux( + runtime.WithMarshalerOption("application/json", new(JsonMarshaler)), + runtime.WithMarshalerOption("application/x-ndjson", new(JsonlMarshaler)), + runtime.WithMarshalerOption("text/plain", new(TextMarshaler)), + ) + opts := []grpc.DialOption{grpc.WithInsecure()} + + err := distribute.RegisterDistributeHandlerFromEndpoint(s.ctx, mux, s.grpcAddr, opts) + if err != nil { + return err + } + + s.listener, err = net.Listen("tcp", s.grpcGatewayAddr) + if err != nil { + return err + } + + err = http.Serve(s.listener, mux) + if err != nil { + return err + } + + return nil +} + +func (s *GRPCGateway) Stop() error { + defer s.cancel() + + err := s.listener.Close() + if err != nil { + return err + } + + return nil +} + +func (s *GRPCGateway) GetAddress() (string, error) { + tcpAddr, err := net.ResolveTCPAddr("tcp", s.listener.Addr().String()) + if err != nil { + return "", err + } + + v4Addr := "" + if tcpAddr.IP.To4() != nil { + v4Addr = tcpAddr.IP.To4().String() + } + port := tcpAddr.Port + + return fmt.Sprintf("%s:%d", v4Addr, port), nil +} diff --git a/dispatcher/grpc_service.go b/dispatcher/grpc_service.go index e49e63d..0657119 100644 --- a/dispatcher/grpc_service.go +++ b/dispatcher/grpc_service.go @@ -28,6 +28,7 @@ import ( "github.com/blevesearch/bleve" "github.com/blevesearch/bleve/search" "github.com/golang/protobuf/ptypes/any" + "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/indexer" "github.com/mosuka/blast/manager" "github.com/mosuka/blast/protobuf" @@ -145,13 +146,14 @@ func (s *GRPCService) getManagerCluster(managerAddr string) (*management.Cluster return nil, err } - managers, err := client.ClusterInfo() + req := &empty.Empty{} + res, err := client.ClusterInfo(req) if err != nil { s.logger.Error(err.Error()) return nil, err } - return managers, nil + return res.Cluster, nil } func (s *GRPCService) cloneManagerCluster(cluster *management.Cluster) (*management.Cluster, error) { @@ -191,7 +193,8 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { } // create stream for watching cluster changes - stream, err := client.ClusterWatch() + req := &empty.Empty{} + stream, err := client.ClusterWatch(req) if err != nil { s.logger.Error(err.Error()) continue @@ -199,10 +202,10 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.logger.Info("wait for receive a manager cluster updates from stream") resp, err := stream.Recv() - //if err == io.EOF { - // s.logger.Info(err.Error()) - // continue - //} + if err == io.EOF { + s.logger.Info(err.Error()) + continue + } if err != nil { s.logger.Error(err.Error()) continue @@ -318,31 +321,36 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { } // get initial indexers - shards, err := client.Get("/cluster/shards") + req := &management.GetRequest{ + Key: "/cluster/shards", + } + res, err := client.Get(req) if err != nil { s.logger.Fatal(err.Error()) return } - if shards == nil { + if res.Value == nil { s.logger.Error("/cluster/shards is nil") } - for shardId, shardIntr := range *shards.(*map[string]interface{}) { - shardBytes, err := json.Marshal(shardIntr) + + shards, err := protobuf.MarshalAny(res.Value) + for shardId, shard := range *shards.(*map[string]interface{}) { + shardBytes, err := json.Marshal(shard) if err != nil { s.logger.Error(err.Error()) continue } - var shard *index.Cluster - err = json.Unmarshal(shardBytes, &shard) + var cluster *index.Cluster + err = json.Unmarshal(shardBytes, &cluster) if err != nil { s.logger.Error(err.Error()) continue } - s.indexers[shardId] = shard + s.indexers[shardId] = cluster - for nodeId, node := range shard.Nodes { + for nodeId, node := range cluster.Nodes { if node.Metadata.GrpcAddress == "" { s.logger.Warn("missing gRPC address", zap.String("id", node.Id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) continue @@ -371,7 +379,10 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { continue } - stream, err := client.Watch("/cluster/shards/") + watchReq := &management.WatchRequest{ + Key: "/cluster/shards/", + } + stream, err := client.Watch(watchReq) if err != nil { s.logger.Error(err.Error()) continue @@ -388,37 +399,42 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { } s.logger.Debug("data has changed", zap.Any("command", resp.Command), zap.String("key", resp.Key), zap.Any("value", resp.Value)) - shardsIntr, err := client.Get("/cluster/shards/") + getReq := &management.GetRequest{ + Key: "/cluster/shards/", + } + res, err := client.Get(getReq) if err != nil { s.logger.Error(err.Error()) continue } - if shardsIntr == nil { + if res.Value == nil { s.logger.Error("/cluster/shards is nil") continue } - for shardId, shardIntr := range *shards.(*map[string]interface{}) { - shardBytes, err := json.Marshal(shardIntr) + + shards, err := protobuf.MarshalAny(res.Value) + for shardId, shard := range *shards.(*map[string]interface{}) { + shardBytes, err := json.Marshal(shard) if err != nil { s.logger.Error(err.Error()) continue } - var shard *index.Cluster - err = json.Unmarshal(shardBytes, &shard) + var cluster *index.Cluster + err = json.Unmarshal(shardBytes, &cluster) if err != nil { s.logger.Error(err.Error()) continue } - s.indexers[shardId] = shard + s.indexers[shardId] = cluster if _, exist := s.indexerClients[shardId]; !exist { s.indexerClients[shardId] = make(map[string]*indexer.GRPCClient) } // open clients for indexer nodes - for nodeId, node := range shard.Nodes { + for nodeId, node := range cluster.Nodes { if node.Metadata.GrpcAddress == "" { s.logger.Warn("missing gRPC address", zap.String("id", node.Id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) continue @@ -514,18 +530,24 @@ func (s *GRPCService) NodeHealthCheck(ctx context.Context, req *distribute.NodeH resp := &distribute.NodeHealthCheckResponse{} switch req.Probe { + case distribute.NodeHealthCheckRequest_UNKNOWN: + fallthrough case distribute.NodeHealthCheckRequest_HEALTHINESS: resp.State = distribute.NodeHealthCheckResponse_HEALTHY case distribute.NodeHealthCheckRequest_LIVENESS: resp.State = distribute.NodeHealthCheckResponse_ALIVE case distribute.NodeHealthCheckRequest_READINESS: resp.State = distribute.NodeHealthCheckResponse_READY + default: + err := errors.New("unknown probe") + s.logger.Error(err.Error()) + return resp, status.Error(codes.InvalidArgument, err.Error()) } return resp, nil } -func (s *GRPCService) GetDocument(ctx context.Context, req *distribute.GetDocumentRequest) (*distribute.GetDocumentResponse, error) { +func (s *GRPCService) Get(ctx context.Context, req *distribute.GetRequest) (*distribute.GetResponse, error) { indexerClients := s.getIndexerClients() // cluster id list sorted by cluster id @@ -537,7 +559,7 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *distribute.GetDocume type respVal struct { clusterId string - doc *index.Document + res *index.GetResponse err error } @@ -549,11 +571,15 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *distribute.GetDocume wg.Add(1) go func(clusterId string, client *indexer.GRPCClient, id string, respChan chan respVal) { // index documents - doc, err := client.GetDocument(id) + req := &index.GetRequest{ + Id: id, + } + res, err := client.Get(req) + wg.Done() respChan <- respVal{ clusterId: clusterId, - doc: doc, + res: res, err: err, } }(clusterId, client, req.Id, respChan) @@ -564,28 +590,35 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *distribute.GetDocume close(respChan) // summarize responses - var doc *index.Document + iRes := &index.GetResponse{} for r := range respChan { - if r.doc != nil { - doc = r.doc + if r.res != nil { + iRes = r.res } if r.err != nil { s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) } } - resp := &distribute.GetDocumentResponse{} - - // response - resp.Document = doc + resp := &distribute.GetResponse{ + Fields: iRes.Fields, + } return resp, nil } -func (s *GRPCService) Search(ctx context.Context, req *distribute.SearchRequest) (*distribute.SearchResponse, error) { - start := time.Now() +func (s *GRPCService) docIdHash(docId string) uint64 { + hash := fnv.New64() + _, err := hash.Write([]byte(docId)) + if err != nil { + return 0 + } - resp := &distribute.SearchResponse{} + return hash.Sum64() +} + +func (s *GRPCService) Index(ctx context.Context, req *distribute.IndexRequest) (*empty.Empty, error) { + res := &empty.Empty{} indexerClients := s.getIndexerClients() @@ -596,127 +629,73 @@ func (s *GRPCService) Search(ctx context.Context, req *distribute.SearchRequest) sort.Strings(clusterIds) } - type respVal struct { - clusterId string - searchResult *bleve.SearchResult - err error - } + docIdHash := s.docIdHash(req.Id) + clusterNum := uint64(len(indexerClients)) + clusterId := clusterIds[int(docIdHash%clusterNum)] - // create response channel - respChan := make(chan respVal, len(clusterIds)) + iReq := &index.IndexRequest{ + Id: req.Id, + Fields: req.Fields, + } - // create search request - ins, err := protobuf.MarshalAny(req.SearchRequest) + res, err := indexerClients[clusterId].Index(iReq) if err != nil { s.logger.Error(err.Error()) - return resp, err + return res, status.Error(codes.Internal, err.Error()) } - searchRequest := ins.(*bleve.SearchRequest) - // change to distributed search request - from := searchRequest.From - size := searchRequest.Size - searchRequest.From = 0 - searchRequest.Size = from + size + return res, nil +} + +func (s *GRPCService) Delete(ctx context.Context, req *distribute.DeleteRequest) (*empty.Empty, error) { + resp := &empty.Empty{} + + indexerClients := s.getIndexerClients() + + // cluster id list sorted by cluster id + clusterIds := make([]string, 0) + for clusterId := range indexerClients { + clusterIds = append(clusterIds, clusterId) + sort.Strings(clusterIds) + } + + type respVal struct { + clusterId string + err error + } + + // create response channel + respChan := make(chan respVal, len(clusterIds)) wg := &sync.WaitGroup{} for clusterId, client := range indexerClients { wg.Add(1) - go func(clusterId string, client *indexer.GRPCClient, searchRequest *bleve.SearchRequest, respChan chan respVal) { - searchResult, err := client.Search(searchRequest) + go func(clusterId string, client *indexer.GRPCClient, id string, respChan chan respVal) { + // index documents + iReq := &index.DeleteRequest{Id: id} + _, err := client.Delete(iReq) wg.Done() respChan <- respVal{ - clusterId: clusterId, - searchResult: searchResult, - err: err, + clusterId: clusterId, + err: err, } - }(clusterId, client, searchRequest, respChan) + }(clusterId, client, req.Id, respChan) } wg.Wait() // close response channel close(respChan) - // revert to original search request - searchRequest.From = from - searchRequest.Size = size - - // summarize responses - var searchResult *bleve.SearchResult for r := range respChan { - if r.searchResult != nil { - if searchResult == nil { - searchResult = r.searchResult - } else { - searchResult.Merge(r.searchResult) - } - } if r.err != nil { s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) } } - // handle case where no results were successful - if searchResult == nil { - searchResult = &bleve.SearchResult{ - Status: &bleve.SearchStatus{ - Errors: make(map[string]error), - }, - } - } - - // sort all hits with the requested order - if len(searchRequest.Sort) > 0 { - sorter := sortutils.NewMultiSearchHitSorter(searchRequest.Sort, searchResult.Hits) - sort.Sort(sorter) - } - - // now skip over the correct From - if searchRequest.From > 0 && len(searchResult.Hits) > searchRequest.From { - searchResult.Hits = searchResult.Hits[searchRequest.From:] - } else if searchRequest.From > 0 { - searchResult.Hits = search.DocumentMatchCollection{} - } - - // now trim to the correct size - if searchRequest.Size > 0 && len(searchResult.Hits) > searchRequest.Size { - searchResult.Hits = searchResult.Hits[0:searchRequest.Size] - } - - // fix up facets - for name, fr := range searchRequest.Facets { - searchResult.Facets.Fixup(name, fr.Size) - } - - // fix up original request - searchResult.Request = searchRequest - searchDuration := time.Since(start) - searchResult.Took = searchDuration - - searchResultAny := &any.Any{} - err = protobuf.UnmarshalAny(searchResult, searchResultAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, err - } - - // response - resp.SearchResult = searchResultAny - return resp, nil } -func (s *GRPCService) docIdHash(docId string) uint64 { - hash := fnv.New64() - _, err := hash.Write([]byte(docId)) - if err != nil { - return 0 - } - - return hash.Sum64() -} - -func (s *GRPCService) IndexDocument(stream distribute.Distribute_IndexDocumentServer) error { +func (s *GRPCService) BulkIndex(ctx context.Context, req *distribute.BulkIndexRequest) (*distribute.BulkIndexResponse, error) { indexerClients := s.getIndexerClients() // cluster id list sorted by cluster id @@ -732,27 +711,17 @@ func (s *GRPCService) IndexDocument(stream distribute.Distribute_IndexDocumentSe docSet[clusterId] = make([]*index.Document, 0) } - for { - req, err := stream.Recv() - if err != nil { - if err == io.EOF { - s.logger.Debug(err.Error()) - break - } - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - + for _, doc := range req.Documents { // distribute documents to each cluster based on document id - docIdHash := s.docIdHash(req.Document.Id) + docIdHash := s.docIdHash(doc.Id) clusterNum := uint64(len(indexerClients)) clusterId := clusterIds[int(docIdHash%clusterNum)] - docSet[clusterId] = append(docSet[clusterId], req.Document) + docSet[clusterId] = append(docSet[clusterId], doc) } type respVal struct { clusterId string - count int + res *index.BulkIndexResponse err error } @@ -763,11 +732,14 @@ func (s *GRPCService) IndexDocument(stream distribute.Distribute_IndexDocumentSe for clusterId, docs := range docSet { wg.Add(1) go func(clusterId string, docs []*index.Document, respChan chan respVal) { - count, err := indexerClients[clusterId].IndexDocument(docs) + iReq := &index.BulkIndexRequest{ + Documents: docs, + } + iRes, err := indexerClients[clusterId].BulkIndex(iReq) wg.Done() respChan <- respVal{ clusterId: clusterId, - count: count, + res: iRes, err: err, } }(clusterId, docs, respChan) @@ -780,8 +752,8 @@ func (s *GRPCService) IndexDocument(stream distribute.Distribute_IndexDocumentSe // summarize responses totalCount := 0 for r := range respChan { - if r.count >= 0 { - totalCount += r.count + if r.res.Count >= 0 { + totalCount += int(r.res.Count) } if r.err != nil { s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) @@ -789,14 +761,12 @@ func (s *GRPCService) IndexDocument(stream distribute.Distribute_IndexDocumentSe } // response - resp := &distribute.IndexDocumentResponse{ + return &distribute.BulkIndexResponse{ Count: int32(totalCount), - } - - return stream.SendAndClose(resp) + }, nil } -func (s *GRPCService) DeleteDocument(stream distribute.Distribute_DeleteDocumentServer) error { +func (s *GRPCService) BulkDelete(ctx context.Context, req *distribute.BulkDeleteRequest) (*distribute.BulkDeleteResponse, error) { indexerClients := s.getIndexerClients() // cluster id list sorted by cluster id @@ -806,25 +776,9 @@ func (s *GRPCService) DeleteDocument(stream distribute.Distribute_DeleteDocument sort.Strings(clusterIds) } - ids := make([]string, 0) - - for { - req, err := stream.Recv() - if err != nil { - if err == io.EOF { - s.logger.Debug(err.Error()) - break - } - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - - ids = append(ids, req.Id) - } - type respVal struct { clusterId string - count int + res *index.BulkDeleteResponse err error } @@ -836,14 +790,17 @@ func (s *GRPCService) DeleteDocument(stream distribute.Distribute_DeleteDocument wg.Add(1) go func(clusterId string, client *indexer.GRPCClient, ids []string, respChan chan respVal) { // index documents - count, err := client.DeleteDocument(ids) + iReq := &index.BulkDeleteRequest{ + Ids: ids, + } + iRes, err := client.BulkDelete(iReq) wg.Done() respChan <- respVal{ clusterId: clusterId, - count: count, + res: iRes, err: err, } - }(clusterId, client, ids, respChan) + }(clusterId, client, req.Ids, respChan) } wg.Wait() @@ -851,17 +808,167 @@ func (s *GRPCService) DeleteDocument(stream distribute.Distribute_DeleteDocument close(respChan) // summarize responses - totalCount := len(ids) + totalCount := 0 for r := range respChan { + if r.res.Count >= 0 { + totalCount += int(r.res.Count) + } if r.err != nil { s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) } } - // response - resp := &distribute.DeleteDocumentResponse{ + return &distribute.BulkDeleteResponse{ Count: int32(totalCount), + }, nil +} + +func (s *GRPCService) Search(ctx context.Context, req *distribute.SearchRequest) (*distribute.SearchResponse, error) { + start := time.Now() + + resp := &distribute.SearchResponse{} + + indexerClients := s.getIndexerClients() + + // cluster id list sorted by cluster id + clusterIds := make([]string, 0) + for clusterId := range indexerClients { + clusterIds = append(clusterIds, clusterId) + sort.Strings(clusterIds) + } + + type respVal struct { + clusterId string + searchResult *bleve.SearchResult + err error } - return stream.SendAndClose(resp) + // create response channel + respChan := make(chan respVal, len(clusterIds)) + + // create search request + ins, err := protobuf.MarshalAny(req.SearchRequest) + if err != nil { + s.logger.Error(err.Error()) + return resp, err + } + searchRequest := ins.(*bleve.SearchRequest) + + // change to distributed search request + from := searchRequest.From + size := searchRequest.Size + searchRequest.From = 0 + searchRequest.Size = from + size + + wg := &sync.WaitGroup{} + for clusterId, client := range indexerClients { + wg.Add(1) + go func(clusterId string, client *indexer.GRPCClient, searchRequest *bleve.SearchRequest, respChan chan respVal) { + searchRequestAny := &any.Any{} + err := protobuf.UnmarshalAny(searchRequest, searchRequestAny) + if err != nil { + respChan <- respVal{ + clusterId: clusterId, + searchResult: nil, + err: err, + } + return + } + + iReq := &index.SearchRequest{ + SearchRequest: searchRequestAny, + } + + iRes, err := client.Search(iReq) + + searchResult, err := protobuf.MarshalAny(iRes.SearchResult) + if err != nil { + respChan <- respVal{ + clusterId: clusterId, + searchResult: nil, + err: err, + } + return + } + + wg.Done() + respChan <- respVal{ + clusterId: clusterId, + searchResult: searchResult.(*bleve.SearchResult), + err: err, + } + }(clusterId, client, searchRequest, respChan) + } + wg.Wait() + + // close response channel + close(respChan) + + // revert to original search request + searchRequest.From = from + searchRequest.Size = size + + // summarize responses + var searchResult *bleve.SearchResult + for r := range respChan { + if r.searchResult != nil { + if searchResult == nil { + searchResult = r.searchResult + } else { + searchResult.Merge(r.searchResult) + } + } + if r.err != nil { + s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) + } + } + + // handle case where no results were successful + if searchResult == nil { + searchResult = &bleve.SearchResult{ + Status: &bleve.SearchStatus{ + Errors: make(map[string]error), + }, + } + } + + // sort all hits with the requested order + if len(searchRequest.Sort) > 0 { + sorter := sortutils.NewMultiSearchHitSorter(searchRequest.Sort, searchResult.Hits) + sort.Sort(sorter) + } + + // now skip over the correct From + if searchRequest.From > 0 && len(searchResult.Hits) > searchRequest.From { + searchResult.Hits = searchResult.Hits[searchRequest.From:] + } else if searchRequest.From > 0 { + searchResult.Hits = search.DocumentMatchCollection{} + } + + // now trim to the correct size + if searchRequest.Size > 0 && len(searchResult.Hits) > searchRequest.Size { + searchResult.Hits = searchResult.Hits[0:searchRequest.Size] + } + + // fix up facets + for name, fr := range searchRequest.Facets { + searchResult.Facets.Fixup(name, fr.Size) + } + + // fix up original request + searchResult.Request = searchRequest + searchDuration := time.Since(start) + searchResult.Took = searchDuration + + searchResultAny := &any.Any{} + err = protobuf.UnmarshalAny(searchResult, searchResultAny) + if err != nil { + s.logger.Error(err.Error()) + return resp, err + } + + // response + resp.SearchResult = searchResultAny + + return resp, nil } diff --git a/dispatcher/http_handler.go b/dispatcher/http_handler.go index dec3163..3e2ec1b 100644 --- a/dispatcher/http_handler.go +++ b/dispatcher/http_handler.go @@ -15,20 +15,11 @@ package dispatcher import ( - "bufio" - "encoding/json" - "io" - "io/ioutil" "net/http" - "strings" "time" - "github.com/blevesearch/bleve" - "github.com/golang/protobuf/proto" "github.com/gorilla/mux" - "github.com/mosuka/blast/errors" blasthttp "github.com/mosuka/blast/http" - "github.com/mosuka/blast/protobuf/index" "github.com/mosuka/blast/version" "github.com/prometheus/client_golang/prometheus/promhttp" "go.uber.org/zap" @@ -37,43 +28,23 @@ import ( type Router struct { mux.Router - GRPCClient *GRPCClient - logger *zap.Logger + logger *zap.Logger } -func NewRouter(grpcAddr string, logger *zap.Logger) (*Router, error) { - grpcClient, err := NewGRPCClient(grpcAddr) - if err != nil { - return nil, err - } - +func NewRouter(logger *zap.Logger) (*Router, error) { router := &Router{ - GRPCClient: grpcClient, - logger: logger, + logger: logger, } router.StrictSlash(true) router.Handle("/", NewRootHandler(logger)).Methods("GET") - router.Handle("/documents", NewSetDocumentHandler(router.GRPCClient, logger)).Methods("PUT") - router.Handle("/documents", NewDeleteDocumentHandler(router.GRPCClient, logger)).Methods("DELETE") - router.Handle("/documents/{id}", NewGetDocumentHandler(router.GRPCClient, logger)).Methods("GET") - router.Handle("/documents/{id}", NewSetDocumentHandler(router.GRPCClient, logger)).Methods("PUT") - router.Handle("/documents/{id}", NewDeleteDocumentHandler(router.GRPCClient, logger)).Methods("DELETE") - router.Handle("/search", NewSearchHandler(router.GRPCClient, logger)).Methods("POST") router.Handle("/metrics", promhttp.Handler()).Methods("GET") return router, nil } func (r *Router) Close() error { - r.GRPCClient.Cancel() - - err := r.GRPCClient.Close() - if err != nil { - return err - } - return nil } @@ -106,536 +77,3 @@ func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { blasthttp.WriteResponse(w, content, status, h.logger) } - -type GetHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewGetDocumentHandler(client *GRPCClient, logger *zap.Logger) *GetHandler { - return &GetHandler{ - client: client, - logger: logger, - } -} - -func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - vars := mux.Vars(r) - - doc, err := h.client.GetDocument(vars["id"]) - if err != nil { - switch err { - case errors.ErrNotFound: - status = http.StatusNotFound - default: - status = http.StatusInternalServerError - } - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - content, err = index.MarshalDocument(doc) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type IndexHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewSetDocumentHandler(client *GRPCClient, logger *zap.Logger) *IndexHandler { - return &IndexHandler{ - client: client, - logger: logger, - } -} - -func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - // create documents - docs := make([]*index.Document, 0) - - vars := mux.Vars(r) - id := vars["id"] - - bulk := func(values []string) bool { - for _, value := range values { - if strings.ToLower(value) == "true" { - return true - } - } - return false - }(r.URL.Query()["bulk"]) - - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if id == "" { - if bulk { - s := strings.NewReader(string(bodyBytes)) - reader := bufio.NewReader(s) - for { - docBytes, err := reader.ReadBytes('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(docBytes) > 0 { - var doc *index.Document - err = proto.Unmarshal(bodyBytes, doc) - //doc, err := indexutils.NewDocumentFromBytes(docBytes) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docs = append(docs, doc) - } - break - } - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docs = append(docs, doc) - } - } - } else { - doc := &index.Document{} - err = index.UnmarshalDocument(bodyBytes, doc) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docs = append(docs, doc) - } - } else { - var fieldsMap map[string]interface{} - err := json.Unmarshal([]byte(bodyBytes), &fieldsMap) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docMap := map[string]interface{}{ - "id": id, - "fields": fieldsMap, - } - docBytes, err := json.Marshal(docMap) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docs = append(docs, doc) - } - - // index documents in bulk - count, err := h.client.IndexDocument(docs) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // create JSON content - msgMap := map[string]interface{}{ - "count": count, - } - content, err = json.MarshalIndent(msgMap, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type DeleteHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewDeleteDocumentHandler(client *GRPCClient, logger *zap.Logger) *DeleteHandler { - return &DeleteHandler{ - client: client, - logger: logger, - } -} - -func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - // create documents - ids := make([]string, 0) - - vars := mux.Vars(r) - id := vars["id"] - - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if id == "" { - s := strings.NewReader(string(bodyBytes)) - reader := bufio.NewReader(s) - for { - docId, err := reader.ReadString('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if docId == "" { - ids = append(ids, docId) - } - break - } - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if docId == "" { - ids = append(ids, docId) - } - } - } else { - // Deleting a document - ids = append(ids, id) - } - - // delete documents in bulk - count, err := h.client.DeleteDocument(ids) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // create JSON content - msgMap := map[string]interface{}{ - "count": count, - } - content, err = json.MarshalIndent(msgMap, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } -} - -type SearchHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewSearchHandler(client *GRPCClient, logger *zap.Logger) *SearchHandler { - return &SearchHandler{ - client: client, - logger: logger, - } -} - -func (h *SearchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - searchRequestBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // []byte -> bleve.SearchRequest - searchRequest := bleve.NewSearchRequest(nil) - if len(searchRequestBytes) > 0 { - err := json.Unmarshal(searchRequestBytes, searchRequest) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - } - - searchResult, err := h.client.Search(searchRequest) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - content, err = json.MarshalIndent(&searchResult, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} diff --git a/dispatcher/server.go b/dispatcher/server.go index 447c51a..529401e 100644 --- a/dispatcher/server.go +++ b/dispatcher/server.go @@ -22,6 +22,7 @@ import ( type Server struct { managerGrpcAddress string grpcAddress string + grpcGatewayAddress string httpAddress string logger *zap.Logger grpcLogger *zap.Logger @@ -29,14 +30,16 @@ type Server struct { grpcService *GRPCService grpcServer *GRPCServer + grpcGateway *GRPCGateway httpRouter *Router httpServer *HTTPServer } -func NewServer(managerGrpcAddress string, grpcAddress string, httpAddress string, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { +func NewServer(managerGrpcAddress string, grpcAddress string, grpcGatewayAddress string, httpAddress string, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { return &Server{ managerGrpcAddress: managerGrpcAddress, grpcAddress: grpcAddress, + grpcGatewayAddress: grpcGatewayAddress, httpAddress: httpAddress, logger: logger, grpcLogger: grpcLogger, @@ -61,8 +64,15 @@ func (s *Server) Start() { return } + // create gRPC gateway + s.grpcGateway, err = NewGRPCGateway(s.grpcGatewayAddress, s.grpcAddress, s.logger) + if err != nil { + s.logger.Error(err.Error()) + return + } + // create HTTP router - s.httpRouter, err = NewRouter(s.grpcAddress, s.logger) + s.httpRouter, err = NewRouter(s.logger) if err != nil { s.logger.Fatal(err.Error()) return @@ -95,6 +105,12 @@ func (s *Server) Start() { } }() + // start gRPC gateway + s.logger.Info("start gRPC gateway") + go func() { + _ = s.grpcGateway.Start() + }() + // start HTTP server s.logger.Info("start HTTP server") go func() { @@ -109,11 +125,18 @@ func (s *Server) Stop() { s.logger.Error(err.Error()) } + s.logger.Info("stop HTTP router") err = s.httpRouter.Close() if err != nil { s.logger.Error(err.Error()) } + s.logger.Info("stop gRPC gateway") + err = s.grpcGateway.Stop() + if err != nil { + s.logger.Error(err.Error()) + } + s.logger.Info("stop gRPC server") err = s.grpcServer.Stop() if err != nil { diff --git a/dispatcher/server_test.go b/dispatcher/server_test.go index 28ed3f4..dd727d7 100644 --- a/dispatcher/server_test.go +++ b/dispatcher/server_test.go @@ -22,13 +22,13 @@ import ( "testing" "time" + "github.com/golang/protobuf/ptypes/empty" + "github.com/google/go-cmp/cmp" "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/protobuf/index" - "github.com/mosuka/blast/indexutils" - "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf/index" "github.com/mosuka/blast/protobuf/management" "github.com/mosuka/blast/testutils" ) @@ -42,6 +42,7 @@ func TestServer_Start(t *testing.T) { managerPeerGrpcAddress1 := "" managerGrpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + managerGrpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) managerHttpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) managerNodeId1 := "manager1" managerBindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -53,8 +54,9 @@ func TestServer_Start(t *testing.T) { BindAddress: managerBindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress1, - HttpAddress: managerHttpAddress1, + GrpcAddress: managerGrpcAddress1, + GrpcGatewayAddress: managerGrpcGatewayAddress1, + HttpAddress: managerHttpAddress1, }, } @@ -84,6 +86,7 @@ func TestServer_Start(t *testing.T) { managerPeerGrpcAddress2 := managerGrpcAddress1 managerGrpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + managerGrpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) managerHttpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) managerNodeId2 := "manager2" managerBindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -95,8 +98,9 @@ func TestServer_Start(t *testing.T) { BindAddress: managerBindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress2, - HttpAddress: managerHttpAddress2, + GrpcAddress: managerGrpcAddress2, + GrpcGatewayAddress: managerGrpcGatewayAddress2, + HttpAddress: managerHttpAddress2, }, } @@ -126,6 +130,7 @@ func TestServer_Start(t *testing.T) { managerPeerGrpcAddress3 := managerGrpcAddress1 managerGrpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + managerGrpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) managerHttpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) managerNodeId3 := "manager3" managerBindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -137,8 +142,9 @@ func TestServer_Start(t *testing.T) { BindAddress: managerBindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress3, - HttpAddress: managerHttpAddress3, + GrpcAddress: managerGrpcAddress3, + GrpcGatewayAddress: managerGrpcGatewayAddress3, + HttpAddress: managerHttpAddress3, }, } @@ -175,7 +181,7 @@ func TestServer_Start(t *testing.T) { t.Fatalf("%v", err) } // get cluster info from manager1 - managerCluster1, err := managerClient1.ClusterInfo() + resClusterInfo, err := managerClient1.ClusterInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -186,8 +192,9 @@ func TestServer_Start(t *testing.T) { BindAddress: managerBindAddress1, State: management.Node_LEADER, Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress1, - HttpAddress: managerHttpAddress1, + GrpcAddress: managerGrpcAddress1, + GrpcGatewayAddress: managerGrpcGatewayAddress1, + HttpAddress: managerHttpAddress1, }, }, managerNodeId2: { @@ -195,8 +202,9 @@ func TestServer_Start(t *testing.T) { BindAddress: managerBindAddress2, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress2, - HttpAddress: managerHttpAddress2, + GrpcAddress: managerGrpcAddress2, + GrpcGatewayAddress: managerGrpcGatewayAddress2, + HttpAddress: managerHttpAddress2, }, }, managerNodeId3: { @@ -204,13 +212,14 @@ func TestServer_Start(t *testing.T) { BindAddress: managerBindAddress3, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress3, - HttpAddress: managerHttpAddress3, + GrpcAddress: managerGrpcAddress3, + GrpcGatewayAddress: managerGrpcGatewayAddress3, + HttpAddress: managerHttpAddress3, }, }, }, } - actManagerCluster1 := managerCluster1 + actManagerCluster1 := resClusterInfo.Cluster if !reflect.DeepEqual(expManagerCluster1, actManagerCluster1) { t.Fatalf("expected content to see %v, saw %v", expManagerCluster1, actManagerCluster1) } @@ -219,9 +228,10 @@ func TestServer_Start(t *testing.T) { // indexer cluster1 // indexerManagerGrpcAddress1 := managerGrpcAddress1 - indexerShardId1 := "shard-1" + indexerShardId1 := "shard1" indexerPeerGrpcAddress1 := "" indexerGrpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerGrpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerHttpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerNodeId1 := "indexer1" indexerBindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -236,8 +246,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress1, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress1, - HttpAddress: indexerHttpAddress1, + GrpcAddress: indexerGrpcAddress1, + GrpcGatewayAddress: indexerGrpcGatewayAddress1, + HttpAddress: indexerHttpAddress1, }, } indexerIndexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) @@ -259,9 +270,10 @@ func TestServer_Start(t *testing.T) { time.Sleep(5 * time.Second) indexerManagerGrpcAddress2 := managerGrpcAddress1 - indexerShardId2 := "shard-1" + indexerShardId2 := "shard1" indexerPeerGrpcAddress2 := "" indexerGrpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerGrpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerHttpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerNodeId2 := "indexer2" indexerBindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -276,8 +288,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress2, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress2, - HttpAddress: indexerHttpAddress2, + GrpcAddress: indexerGrpcAddress2, + GrpcGatewayAddress: indexerGrpcGatewayAddress2, + HttpAddress: indexerHttpAddress2, }, } indexerIndexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) @@ -299,9 +312,10 @@ func TestServer_Start(t *testing.T) { time.Sleep(5 * time.Second) indexerManagerGrpcAddress3 := managerGrpcAddress1 - indexerShardId3 := "shard-1" + indexerShardId3 := "shard1" indexerPeerGrpcAddress3 := "" indexerGrpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerGrpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerHttpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerNodeId3 := "indexer3" indexerBindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -316,8 +330,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress3, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress3, - HttpAddress: indexerHttpAddress3, + GrpcAddress: indexerGrpcAddress3, + GrpcGatewayAddress: indexerGrpcGatewayAddress3, + HttpAddress: indexerHttpAddress3, }, } indexerIndexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) @@ -347,7 +362,7 @@ func TestServer_Start(t *testing.T) { t.Fatalf("%v", err) } // get cluster info from manager1 - indexerCluster1, err := indexerClient1.ClusterInfo() + resClusterInfoIndexer1, err := indexerClient1.ClusterInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -358,8 +373,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress1, State: index.Node_LEADER, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress1, - HttpAddress: indexerHttpAddress1, + GrpcAddress: indexerGrpcAddress1, + GrpcGatewayAddress: indexerGrpcGatewayAddress1, + HttpAddress: indexerHttpAddress1, }, }, indexerNodeId2: { @@ -367,8 +383,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress2, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress2, - HttpAddress: indexerHttpAddress2, + GrpcAddress: indexerGrpcAddress2, + GrpcGatewayAddress: indexerGrpcGatewayAddress2, + HttpAddress: indexerHttpAddress2, }, }, indexerNodeId3: { @@ -376,14 +393,15 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress3, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress3, - HttpAddress: indexerHttpAddress3, + GrpcAddress: indexerGrpcAddress3, + GrpcGatewayAddress: indexerGrpcGatewayAddress3, + HttpAddress: indexerHttpAddress3, }, }, }, } - actIndexerCluster1 := indexerCluster1 - if !reflect.DeepEqual(expIndexerCluster1, actIndexerCluster1) { + actIndexerCluster1 := resClusterInfoIndexer1.Cluster + if !cmp.Equal(expIndexerCluster1, actIndexerCluster1) { t.Fatalf("expected content to see %v, saw %v", expIndexerCluster1, actIndexerCluster1) } @@ -391,9 +409,10 @@ func TestServer_Start(t *testing.T) { // indexer cluster2 // indexerManagerGrpcAddress4 := managerGrpcAddress1 - indexerShardId4 := "shard-2" + indexerShardId4 := "shard2" indexerPeerGrpcAddress4 := "" indexerGrpcAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerGrpcGatewayAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerHttpAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerNodeId4 := "indexer4" indexerBindAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -408,8 +427,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress4, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress4, - HttpAddress: indexerHttpAddress4, + GrpcAddress: indexerGrpcAddress4, + GrpcGatewayAddress: indexerGrpcGatewayAddress4, + HttpAddress: indexerHttpAddress4, }, } indexerIndexMapping4, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) @@ -431,9 +451,10 @@ func TestServer_Start(t *testing.T) { time.Sleep(5 * time.Second) indexerManagerGrpcAddress5 := managerGrpcAddress1 - indexerShardId5 := "shard-2" + indexerShardId5 := "shard2" indexerPeerGrpcAddress5 := "" indexerGrpcAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerGrpcGatewayAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerHttpAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerNodeId5 := "indexer5" indexerBindAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -448,8 +469,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress5, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress5, - HttpAddress: indexerHttpAddress5, + GrpcAddress: indexerGrpcAddress5, + GrpcGatewayAddress: indexerGrpcGatewayAddress5, + HttpAddress: indexerHttpAddress5, }, } indexerIndexMapping5, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) @@ -471,9 +493,10 @@ func TestServer_Start(t *testing.T) { time.Sleep(5 * time.Second) indexerManagerGrpcAddress6 := managerGrpcAddress1 - indexerShardId6 := "shard-2" + indexerShardId6 := "shard2" indexerPeerGrpcAddress6 := "" indexerGrpcAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerGrpcGatewayAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerHttpAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) indexerNodeId6 := "indexer6" indexerBindAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -488,8 +511,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress6, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress6, - HttpAddress: indexerHttpAddress6, + GrpcAddress: indexerGrpcAddress6, + GrpcGatewayAddress: indexerGrpcGatewayAddress6, + HttpAddress: indexerHttpAddress6, }, } indexerIndexMapping6, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) @@ -519,7 +543,7 @@ func TestServer_Start(t *testing.T) { t.Fatalf("%v", err) } // get cluster info from manager1 - indexerCluster2, err := indexerClient2.ClusterInfo() + indexerCluster2, err := indexerClient2.ClusterInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -530,8 +554,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress4, State: index.Node_LEADER, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress4, - HttpAddress: indexerHttpAddress4, + GrpcAddress: indexerGrpcAddress4, + GrpcGatewayAddress: indexerGrpcGatewayAddress4, + HttpAddress: indexerHttpAddress4, }, }, indexerNodeId5: { @@ -539,8 +564,9 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress5, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress5, - HttpAddress: indexerHttpAddress5, + GrpcAddress: indexerGrpcAddress5, + GrpcGatewayAddress: indexerGrpcGatewayAddress5, + HttpAddress: indexerHttpAddress5, }, }, indexerNodeId6: { @@ -548,13 +574,14 @@ func TestServer_Start(t *testing.T) { BindAddress: indexerBindAddress6, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress6, - HttpAddress: indexerHttpAddress6, + GrpcAddress: indexerGrpcAddress6, + GrpcGatewayAddress: indexerGrpcGatewayAddress6, + HttpAddress: indexerHttpAddress6, }, }, }, } - actIndexerCluster2 := indexerCluster2 + actIndexerCluster2 := indexerCluster2.Cluster if !reflect.DeepEqual(expIndexerCluster2, actIndexerCluster2) { t.Fatalf("expected content to see %v, saw %v", expIndexerCluster2, actIndexerCluster2) } @@ -564,9 +591,10 @@ func TestServer_Start(t *testing.T) { // dispatcherManagerGrpcAddress := managerGrpcAddress1 dispatcherGrpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dispatcherGrpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) dispatcherHttpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dispatcher1, err := NewServer(dispatcherManagerGrpcAddress, dispatcherGrpcAddress, dispatcherHttpAddress, logger.Named("dispatcher1"), grpcLogger.Named("dispatcher1"), httpAccessLogger) + dispatcher1, err := NewServer(dispatcherManagerGrpcAddress, dispatcherGrpcAddress, dispatcherGrpcGatewayAddress, dispatcherHttpAddress, logger.Named("dispatcher1"), grpcLogger.Named("dispatcher1"), httpAccessLogger) defer func() { dispatcher1.Stop() }() diff --git a/example/geo_search_request.json b/example/geo_search_request.json index 40baa91..2883245 100644 --- a/example/geo_search_request.json +++ b/example/geo_search_request.json @@ -1,26 +1,28 @@ { - "query": { - "location": { - "lon": -122.107799, - "lat": 37.399285 - }, - "distance": "100mi", - "field": "geo" - }, - "size": 10, - "from": 0, - "fields": [ - "*" - ], - "sort": [ - { - "by": "geo_distance", - "field": "geo", - "unit": "mi", + "search_request": { + "query": { "location": { "lon": -122.107799, "lat": 37.399285 + }, + "distance": "100mi", + "field": "geo" + }, + "size": 10, + "from": 0, + "fields": [ + "*" + ], + "sort": [ + { + "by": "geo_distance", + "field": "geo", + "unit": "mi", + "location": { + "lon": -122.107799, + "lat": 37.399285 + } } - } - ] + ] + } } diff --git a/example/wiki_bulk_delete.txt b/example/wiki_bulk_delete.txt index 6f7ddd9..8928994 100644 --- a/example/wiki_bulk_delete.txt +++ b/example/wiki_bulk_delete.txt @@ -1,4 +1,36 @@ arwiki_1 bgwiki_1 cawiki_1 +cswiki_1 +dawiki_1 +dewiki_1 +elwiki_1 +enwiki_1 +eswiki_1 +fawiki_1 +fiwiki_1 +frwiki_1 +gawiki_1 +glwiki_1 +guwiki_1 +hiwiki_1 +huwiki_1 +hywiki_1 +idwiki_1 +itwiki_1 +jawiki_1 +knwiki_1 +kowiki_1 +mlwiki_1 +nlwiki_1 +nowiki_1 +pswiki_1 +ptwiki_1 +rowiki_1 +ruwiki_1 +svwiki_1 +tawiki_1 +tewiki_1 +thwiki_1 +trwiki_1 zhwiki_1 diff --git a/example/wiki_search_request.json b/example/wiki_search_request.json index c189f9f..3566d99 100644 --- a/example/wiki_search_request.json +++ b/example/wiki_search_request.json @@ -1,44 +1,46 @@ { - "query": { - "query": "+_all:search" - }, - "size": 10, - "from": 0, - "fields": [ - "*" - ], - "sort": [ - "-_score", - "_id", - "-timestamp" - ], - "facets": { - "Type count": { - "size": 10, - "field": "_type" + "search_request": { + "query": { + "query": "+_all:search" }, - "Timestamp range": { - "size": 10, - "field": "timestamp", - "date_ranges": [ - { - "name": "2001 - 2010", - "start": "2001-01-01T00:00:00Z", - "end": "2010-12-31T23:59:59Z" - }, - { - "name": "2011 - 2020", - "start": "2011-01-01T00:00:00Z", - "end": "2020-12-31T23:59:59Z" - } + "size": 10, + "from": 0, + "fields": [ + "*" + ], + "sort": [ + "-_score", + "_id", + "-timestamp" + ], + "facets": { + "Type count": { + "size": 10, + "field": "_type" + }, + "Timestamp range": { + "size": 10, + "field": "timestamp", + "date_ranges": [ + { + "name": "2001 - 2010", + "start": "2001-01-01T00:00:00Z", + "end": "2010-12-31T23:59:59Z" + }, + { + "name": "2011 - 2020", + "start": "2011-01-01T00:00:00Z", + "end": "2020-12-31T23:59:59Z" + } + ] + } + }, + "highlight": { + "style": "html", + "fields": [ + "title", + "text" ] } - }, - "highlight": { - "style": "html", - "fields": [ - "title", - "text" - ] } } diff --git a/example/wiki_search_request_prefix.json b/example/wiki_search_request_prefix.json index adb5f92..0de0b37 100644 --- a/example/wiki_search_request_prefix.json +++ b/example/wiki_search_request_prefix.json @@ -1,14 +1,16 @@ { - "query": { + "search_request": { + "query": { "prefix": "searc", "field": "title_en" - }, - "size": 10, - "from": 0, - "fields": [ - "*" - ], - "sort": [ - "-_score" - ] + }, + "size": 10, + "from": 0, + "fields": [ + "*" + ], + "sort": [ + "-_score" + ] + } } diff --git a/example/wiki_search_request_simple.json b/example/wiki_search_request_simple.json index e4cac4d..39a3e93 100644 --- a/example/wiki_search_request_simple.json +++ b/example/wiki_search_request_simple.json @@ -1,13 +1,15 @@ { - "query": { - "query": "+_all:search" - }, - "size": 10, - "from": 0, - "fields": [ - "*" - ], - "sort": [ - "-_score" - ] + "search_request": { + "query": { + "query": "+_all:search" + }, + "size": 10, + "from": 0, + "fields": [ + "*" + ], + "sort": [ + "-_score" + ] + } } diff --git a/go.mod b/go.mod index 9edf824..99b2de5 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/mosuka/blast go 1.12 require ( + cloud.google.com/go v0.43.0 // indirect github.com/blevesearch/bleve v0.7.0 github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3 // indirect github.com/blevesearch/cld2 v0.0.0-20150916130542-10f17c049ec9 // indirect @@ -17,17 +18,20 @@ require ( github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect github.com/gogo/protobuf v1.1.1 - github.com/golang/protobuf v1.3.1 - github.com/google/go-cmp v0.3.0 + github.com/golang/protobuf v1.3.2 + github.com/google/go-cmp v0.3.1 + github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70 // indirect github.com/gorilla/mux v1.7.0 github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/hashicorp/golang-lru v0.5.1 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.9.5 + github.com/hashicorp/golang-lru v0.5.3 // indirect github.com/hashicorp/raft v1.1.0 github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477 github.com/ikawaha/kagome.ipadic v1.0.1 // indirect github.com/imdario/mergo v0.3.7 github.com/jmhodges/levigo v1.0.0 // indirect + github.com/kr/pty v1.1.8 // indirect github.com/markthethomas/raft-badger v0.0.0-20190420151455-b37d14e77a69 github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217 github.com/mosuka/bbadger v0.0.0-20190319122948-67a91aedfe68 @@ -37,6 +41,7 @@ require ( github.com/prometheus/common v0.2.0 // indirect github.com/prometheus/procfs v0.0.0-20190322151404-55ae3d9d5573 // indirect github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 // indirect + github.com/rogpeppe/fastuuid v1.2.0 // indirect github.com/stretchr/objx v0.1.1 github.com/syndtr/goleveldb v1.0.0 // indirect github.com/tebeka/snowball v0.0.0-20130405174319-16e884df4e19 // indirect @@ -45,9 +50,14 @@ require ( go.uber.org/atomic v1.4.0 // indirect go.uber.org/multierr v1.1.0 // indirect go.uber.org/zap v1.10.0 - golang.org/x/net v0.0.0-20190327214358-63eda1eb0650 // indirect - google.golang.org/genproto v0.0.0-20190327125643-d831d65fe17d // indirect - google.golang.org/grpc v1.19.1 + golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 // indirect + golang.org/x/mobile v0.0.0-20190806162312-597adff16ade // indirect + golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 // indirect + golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa // indirect + golang.org/x/tools v0.0.0-20190808195139-e713427fea3f // indirect + google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 + google.golang.org/grpc v1.22.1 gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/yaml.v2 v2.2.2 + honnef.co/go/tools v0.0.1-2019.2.2 // indirect ) diff --git a/go.sum b/go.sum index 91ed1f7..c57d73c 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,13 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/RoaringBitmap/roaring v0.4.17 h1:oCYFIFEMSQZrLHpywH7919esI1VSrQZ0pJXkZPGIJ78= github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= @@ -40,6 +44,7 @@ github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498 h1:b8rnI4JWbakUNfpm github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498/go.mod h1:mGI1GcdgmlL3Imff7Z+OjkkQ8qSKr443BuZ+qFgWbPQ= github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe h1:2o6Y7KMjJNsuMTF8f2H2eTKRhqH7+bQbjr+D+LnhE5M= github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe/go.mod h1:prYTC8EgTu3gwbqJihkud9zRXISvyulAplQ6exdCo1g= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d h1:SwD98825d6bdB+pEuTxWOXiSjBrHdOl/UVp75eI7JT8= github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= @@ -64,6 +69,8 @@ github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQD github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493 h1:OTanQnFt0bi5iLFSdbEVA/idR6Q2WhCm+deb7ir2CcM= @@ -76,14 +83,29 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.7.0 h1:tOSd0UKHQd6urX6ApfOn4XdBMY6Sh1MfxV3kmaazO+U= @@ -92,6 +114,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmo github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= @@ -105,8 +129,9 @@ github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/raft v1.0.0 h1:htBVktAOtGs4Le5Z7K8SF5H2+oWsQFYVmOgH5loro7Y= github.com/hashicorp/raft v1.0.0/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= github.com/hashicorp/raft v1.1.0 h1:qPMePEczgbkiQsqCsRfuHRqvDUO+zmAInDaD5ptXlq0= @@ -121,12 +146,20 @@ github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/markthethomas/raft-badger v0.0.0-20190420151455-b37d14e77a69 h1:/ylv98AIMI8XzkeqJGmJSTc/zRQrNllmYWW5b2MoyD4= github.com/markthethomas/raft-badger v0.0.0-20190420151455-b37d14e77a69/go.mod h1:H6ZQv8h8j98nwnF25XLGalSOLhFRjFQ2GGNZRNkkw8Y= github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217 h1:oWyemD7bnPAGRGGPE22W1Z+kspkC7Uclz5rdzgxxiwk= @@ -170,6 +203,9 @@ github.com/prometheus/procfs v0.0.0-20190322151404-55ae3d9d5573 h1:gAuD3LIrjkoOO github.com/prometheus/procfs v0.0.0-20190322151404-55ae3d9d5573/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 h1:YDeskXpkNDhPdWN3REluVa46HQOVuVkjkd2sWnrABNQ= github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= @@ -197,6 +233,8 @@ github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= @@ -206,53 +244,126 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190806162312-597adff16ade/go.mod h1:AlhUtkH4DA4asiFC5RgK7ZKmauvtkAVcy9L0epCzlWo= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190327214358-63eda1eb0650 h1:XCbwcsP09zrBt1aYht0fASw+ynbEpYr8NnCkIN9nMM0= -golang.org/x/net v0.0.0-20190327214358-63eda1eb0650/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed h1:uPxWBzB3+mlnjy9W58qY1j/cjyFjutgw/Vhan2zLy/A= golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa h1:KIDDMLT1O0Nr7TSxp8xM5tJcdn8tgyAONntO829og1M= +golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190808195139-e713427fea3f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190327125643-d831d65fe17d h1:XB2jc5XQ9uhizGTS2vWcN01bc4dI6z3C4KY5MQm8SS8= -google.golang.org/genproto v0.0.0-20190327125643-d831d65fe17d/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1 h1:/7cs52RnTJmD43s3uxzlq2U7nqVTd/37viQwMrMNlOM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/indexer/grpc_client.go b/indexer/grpc_client.go index 0d9fb3d..38ace62 100644 --- a/indexer/grpc_client.go +++ b/indexer/grpc_client.go @@ -16,18 +16,11 @@ package indexer import ( "context" - "errors" "math" - "github.com/blevesearch/bleve" - "github.com/golang/protobuf/ptypes/any" "github.com/golang/protobuf/ptypes/empty" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/index" "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) type GRPCClient struct { @@ -96,246 +89,62 @@ func (c *GRPCClient) GetAddress() string { return c.conn.Target() } -func (c *GRPCClient) NodeHealthCheck(probe string, opts ...grpc.CallOption) (string, error) { - req := &index.NodeHealthCheckRequest{} - - switch probe { - case index.NodeHealthCheckRequest_HEALTHINESS.String(): - req.Probe = index.NodeHealthCheckRequest_HEALTHINESS - case index.NodeHealthCheckRequest_LIVENESS.String(): - req.Probe = index.NodeHealthCheckRequest_LIVENESS - case index.NodeHealthCheckRequest_READINESS.String(): - req.Probe = index.NodeHealthCheckRequest_READINESS - default: - req.Probe = index.NodeHealthCheckRequest_HEALTHINESS - } - - resp, err := c.client.NodeHealthCheck(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - return index.NodeHealthCheckResponse_UNHEALTHY.String(), errors.New(st.Message()) - } - - return resp.State.String(), nil +func (c *GRPCClient) NodeHealthCheck(req *index.NodeHealthCheckRequest, opts ...grpc.CallOption) (*index.NodeHealthCheckResponse, error) { + return c.client.NodeHealthCheck(c.ctx, req, opts...) } -func (c *GRPCClient) NodeInfo(opts ...grpc.CallOption) (*index.Node, error) { - resp, err := c.client.NodeInfo(c.ctx, &empty.Empty{}, opts...) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - - return resp.Node, nil +func (c *GRPCClient) NodeInfo(req *empty.Empty, opts ...grpc.CallOption) (*index.NodeInfoResponse, error) { + return c.client.NodeInfo(c.ctx, req, opts...) } -func (c *GRPCClient) ClusterJoin(node *index.Node, opts ...grpc.CallOption) error { - req := &index.ClusterJoinRequest{ - Node: node, - } - - _, err := c.client.ClusterJoin(c.ctx, req, opts...) - if err != nil { - return err - } - - return nil +func (c *GRPCClient) ClusterJoin(req *index.ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.ClusterJoin(c.ctx, req, opts...) } -func (c *GRPCClient) ClusterLeave(id string, opts ...grpc.CallOption) error { - req := &index.ClusterLeaveRequest{ - Id: id, - } - - _, err := c.client.ClusterLeave(c.ctx, req, opts...) - if err != nil { - return err - } - - return nil +func (c *GRPCClient) ClusterLeave(req *index.ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.ClusterLeave(c.ctx, req, opts...) } -func (c *GRPCClient) ClusterInfo(opts ...grpc.CallOption) (*index.Cluster, error) { - resp, err := c.client.ClusterInfo(c.ctx, &empty.Empty{}, opts...) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - - return resp.Cluster, nil +func (c *GRPCClient) ClusterInfo(req *empty.Empty, opts ...grpc.CallOption) (*index.ClusterInfoResponse, error) { + return c.client.ClusterInfo(c.ctx, &empty.Empty{}, opts...) } -func (c *GRPCClient) ClusterWatch(opts ...grpc.CallOption) (index.Index_ClusterWatchClient, error) { - req := &empty.Empty{} - - watchClient, err := c.client.ClusterWatch(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - return nil, errors.New(st.Message()) - } - - return watchClient, nil +func (c *GRPCClient) ClusterWatch(req *empty.Empty, opts ...grpc.CallOption) (index.Index_ClusterWatchClient, error) { + return c.client.ClusterWatch(c.ctx, req, opts...) } -func (c *GRPCClient) GetDocument(id string, opts ...grpc.CallOption) (*index.Document, error) { - req := &index.GetDocumentRequest{ - Id: id, - } - - resp, err := c.client.GetDocument(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - switch st.Code() { - case codes.NotFound: - return nil, blasterrors.ErrNotFound - default: - return nil, errors.New(st.Message()) - } - } - - return resp.Document, nil +func (c *GRPCClient) Get(req *index.GetRequest, opts ...grpc.CallOption) (*index.GetResponse, error) { + return c.client.Get(c.ctx, req, opts...) } -func (c *GRPCClient) Search(searchRequest *bleve.SearchRequest, opts ...grpc.CallOption) (*bleve.SearchResult, error) { - // bleve.SearchRequest -> Any - searchRequestAny := &any.Any{} - err := protobuf.UnmarshalAny(searchRequest, searchRequestAny) - if err != nil { - return nil, err - } - - req := &index.SearchRequest{ - SearchRequest: searchRequestAny, - } - - resp, err := c.client.Search(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - - // Any -> bleve.SearchResult - searchResultInstance, err := protobuf.MarshalAny(resp.SearchResult) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - if searchResultInstance == nil { - return nil, errors.New("nil") - } - searchResult := searchResultInstance.(*bleve.SearchResult) - - return searchResult, nil +func (c *GRPCClient) Index(req *index.IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.Index(c.ctx, req, opts...) } -func (c *GRPCClient) IndexDocument(docs []*index.Document, opts ...grpc.CallOption) (int, error) { - stream, err := c.client.IndexDocument(c.ctx, opts...) - if err != nil { - st, _ := status.FromError(err) - - return -1, errors.New(st.Message()) - } - - for _, doc := range docs { - req := &index.IndexDocumentRequest{ - Document: doc, - //Id: id, - //Fields: fieldsAny, - } - - err = stream.Send(req) - if err != nil { - return -1, err - } - } - - resp, err := stream.CloseAndRecv() - if err != nil { - return -1, err - } - - return int(resp.Count), nil +func (c *GRPCClient) Delete(req *index.DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.Delete(c.ctx, req, opts...) } -func (c *GRPCClient) DeleteDocument(ids []string, opts ...grpc.CallOption) (int, error) { - stream, err := c.client.DeleteDocument(c.ctx, opts...) - if err != nil { - st, _ := status.FromError(err) - - return -1, errors.New(st.Message()) - } - - for _, id := range ids { - req := &index.DeleteDocumentRequest{ - Id: id, - } - - err := stream.Send(req) - if err != nil { - return -1, err - } - } - - resp, err := stream.CloseAndRecv() - if err != nil { - return -1, err - } - - return int(resp.Count), nil +func (c *GRPCClient) BulkIndex(req *index.BulkIndexRequest, opts ...grpc.CallOption) (*index.BulkIndexResponse, error) { + return c.client.BulkIndex(c.ctx, req, opts...) } -func (c *GRPCClient) GetIndexConfig(opts ...grpc.CallOption) (map[string]interface{}, error) { - resp, err := c.client.GetIndexConfig(c.ctx, &empty.Empty{}, opts...) - if err != nil { - st, _ := status.FromError(err) - return nil, errors.New(st.Message()) - } - - indexMapping, err := protobuf.MarshalAny(resp.IndexConfig.IndexMapping) - if err != nil { - st, _ := status.FromError(err) - return nil, errors.New(st.Message()) - } - - indexConfig := map[string]interface{}{ - "index_mapping": indexMapping, - "index_type": resp.IndexConfig.IndexType, - "index_storage_type": resp.IndexConfig.IndexStorageType, - } - - return indexConfig, nil +func (c *GRPCClient) BulkDelete(req *index.BulkDeleteRequest, opts ...grpc.CallOption) (*index.BulkDeleteResponse, error) { + return c.client.BulkDelete(c.ctx, req, opts...) } -func (c *GRPCClient) GetIndexStats(opts ...grpc.CallOption) (map[string]interface{}, error) { - resp, err := c.client.GetIndexStats(c.ctx, &empty.Empty{}, opts...) - if err != nil { - st, _ := status.FromError(err) - return nil, errors.New(st.Message()) - } - - indexStatsIntr, err := protobuf.MarshalAny(resp.IndexStats) - if err != nil { - st, _ := status.FromError(err) - return nil, errors.New(st.Message()) - } - indexStats := *indexStatsIntr.(*map[string]interface{}) - - return indexStats, nil +func (c *GRPCClient) Search(req *index.SearchRequest, opts ...grpc.CallOption) (*index.SearchResponse, error) { + return c.client.Search(c.ctx, req, opts...) } -func (c *GRPCClient) Snapshot(opts ...grpc.CallOption) error { - _, err := c.client.Snapshot(c.ctx, &empty.Empty{}) - if err != nil { - st, _ := status.FromError(err) +func (c *GRPCClient) GetIndexConfig(req *empty.Empty, opts ...grpc.CallOption) (*index.GetIndexConfigResponse, error) { + return c.client.GetIndexConfig(c.ctx, &empty.Empty{}, opts...) +} - return errors.New(st.Message()) - } +func (c *GRPCClient) GetIndexStats(req *empty.Empty, opts ...grpc.CallOption) (*index.GetIndexStatsResponse, error) { + return c.client.GetIndexStats(c.ctx, &empty.Empty{}, opts...) +} - return nil +func (c *GRPCClient) Snapshot(req *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.Snapshot(c.ctx, &empty.Empty{}) } diff --git a/indexer/grpc_gateway.go b/indexer/grpc_gateway.go new file mode 100644 index 0000000..3a1fafa --- /dev/null +++ b/indexer/grpc_gateway.go @@ -0,0 +1,376 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package indexer + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + + "github.com/blevesearch/bleve" + "github.com/golang/protobuf/ptypes/any" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/index" + "go.uber.org/zap" + "google.golang.org/grpc" +) + +type JsonMarshaler struct{} + +// ContentType always Returns "application/json". +func (*JsonMarshaler) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JsonMarshaler) Marshal(v interface{}) ([]byte, error) { + switch v.(type) { + case *index.GetResponse: + value, err := protobuf.MarshalAny(v.(*index.GetResponse).Fields) + if err != nil { + return nil, err + } + return json.Marshal( + map[string]interface{}{ + "fields": value, + }, + ) + case *index.SearchResponse: + value, err := protobuf.MarshalAny(v.(*index.SearchResponse).SearchResult) + if err != nil { + return nil, err + } + return json.Marshal( + map[string]interface{}{ + "search_result": value, + }, + ) + default: + return json.Marshal(v) + } +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JsonMarshaler) Unmarshal(data []byte, v interface{}) error { + switch v.(type) { + case *index.SearchRequest: + m := map[string]interface{}{} + err := json.Unmarshal(data, &m) + if err != nil { + return err + } + searchRequestMap, ok := m["search_request"] + if !ok { + return errors.New("search_request does not exist") + } + searchRequestBytes, err := json.Marshal(searchRequestMap) + if err != nil { + return err + } + searchRequest := bleve.NewSearchRequest(nil) + err = json.Unmarshal(searchRequestBytes, searchRequest) + if err != nil { + return err + } + v.(*index.SearchRequest).SearchRequest = &any.Any{} + return protobuf.UnmarshalAny(searchRequest, v.(*index.SearchRequest).SearchRequest) + default: + return json.Unmarshal(data, v) + } +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JsonMarshaler) NewDecoder(r io.Reader) runtime.Decoder { + return runtime.DecoderFunc( + func(v interface{}) error { + buffer, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + switch v.(type) { + case *index.IndexRequest: + var tmpValue map[string]interface{} + err = json.Unmarshal(buffer, &tmpValue) + if err != nil { + return err + } + id, ok := tmpValue["id"].(string) + if ok { + v.(*index.IndexRequest).Id = id + } + + fields, ok := tmpValue["fields"] + if !ok { + return errors.New("value does not exist") + } + v.(*index.IndexRequest).Fields = &any.Any{} + return protobuf.UnmarshalAny(fields, v.(*index.IndexRequest).Fields) + case *index.SearchRequest: + var tmpValue map[string]interface{} + err = json.Unmarshal(buffer, &tmpValue) + if err != nil { + return err + } + searchRequestMap, ok := tmpValue["search_request"] + if !ok { + return errors.New("value does not exist") + } + searchRequestBytes, err := json.Marshal(searchRequestMap) + if err != nil { + return err + } + var searchRequest *bleve.SearchRequest + err = json.Unmarshal(searchRequestBytes, &searchRequest) + if err != nil { + return err + } + v.(*index.SearchRequest).SearchRequest = &any.Any{} + return protobuf.UnmarshalAny(searchRequest, v.(*index.SearchRequest).SearchRequest) + default: + return json.Unmarshal(buffer, v) + } + }, + ) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JsonMarshaler) NewEncoder(w io.Writer) runtime.Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JsonMarshaler) Delimiter() []byte { + return []byte("\n") +} + +type JsonlMarshaler struct{} + +// ContentType always Returns "application/json". +func (*JsonlMarshaler) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JsonlMarshaler) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JsonlMarshaler) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads JSON-LINE stream from "r". +func (j *JsonlMarshaler) NewDecoder(r io.Reader) runtime.Decoder { + return runtime.DecoderFunc( + func(v interface{}) error { + buffer, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + switch v.(type) { + case *index.BulkIndexRequest: + docs := make([]*index.Document, 0) + reader := bufio.NewReader(bytes.NewReader(buffer)) + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) + if err != nil { + return err + } + docs = append(docs, doc) + } + break + } + } + + if len(docBytes) > 0 { + doc := &index.Document{} + err = index.UnmarshalDocument(docBytes, doc) + if err != nil { + return err + } + docs = append(docs, doc) + } + } + v.(*index.BulkIndexRequest).Documents = docs + return nil + default: + return json.Unmarshal(buffer, v) + } + }, + ) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JsonlMarshaler) NewEncoder(w io.Writer) runtime.Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JsonlMarshaler) Delimiter() []byte { + return []byte("\n") +} + +type TextMarshaler struct{} + +// ContentType always Returns "application/json". +func (*TextMarshaler) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *TextMarshaler) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal unmarshals JSON data into "v". +func (j *TextMarshaler) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads text stream from "r". +func (j *TextMarshaler) NewDecoder(r io.Reader) runtime.Decoder { + return runtime.DecoderFunc( + func(v interface{}) error { + buffer, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + switch v.(type) { + case *index.BulkDeleteRequest: + ids := make([]string, 0) + reader := bufio.NewReader(bytes.NewReader(buffer)) + for { + //idBytes, err := reader.ReadBytes('\n') + idBytes, _, err := reader.ReadLine() + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(idBytes) > 0 { + ids = append(ids, string(idBytes)) + } + break + } + } + + if len(idBytes) > 0 { + ids = append(ids, string(idBytes)) + } + } + v.(*index.BulkDeleteRequest).Ids = ids + return nil + default: + return json.Unmarshal(buffer, v) + } + }, + ) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *TextMarshaler) NewEncoder(w io.Writer) runtime.Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *TextMarshaler) Delimiter() []byte { + return []byte("\n") +} + +type GRPCGateway struct { + grpcGatewayAddr string + grpcAddr string + logger *zap.Logger + + ctx context.Context + cancel context.CancelFunc + listener net.Listener +} + +func NewGRPCGateway(grpcGatewayAddr string, grpcAddr string, logger *zap.Logger) (*GRPCGateway, error) { + return &GRPCGateway{ + grpcGatewayAddr: grpcGatewayAddr, + grpcAddr: grpcAddr, + logger: logger, + }, nil +} + +func (s *GRPCGateway) Start() error { + s.ctx, s.cancel = NewGRPCContext() + + mux := runtime.NewServeMux( + runtime.WithMarshalerOption("application/json", new(JsonMarshaler)), + runtime.WithMarshalerOption("application/x-ndjson", new(JsonlMarshaler)), + runtime.WithMarshalerOption("text/plain", new(TextMarshaler)), + ) + opts := []grpc.DialOption{grpc.WithInsecure()} + + err := index.RegisterIndexHandlerFromEndpoint(s.ctx, mux, s.grpcAddr, opts) + if err != nil { + return err + } + + s.listener, err = net.Listen("tcp", s.grpcGatewayAddr) + if err != nil { + return err + } + + err = http.Serve(s.listener, mux) + if err != nil { + return err + } + + return nil +} + +func (s *GRPCGateway) Stop() error { + defer s.cancel() + + err := s.listener.Close() + if err != nil { + return err + } + + return nil +} + +func (s *GRPCGateway) GetAddress() (string, error) { + tcpAddr, err := net.ResolveTCPAddr("tcp", s.listener.Addr().String()) + if err != nil { + return "", err + } + + v4Addr := "" + if tcpAddr.IP.To4() != nil { + v4Addr = tcpAddr.IP.To4().String() + } + port := tcpAddr.Port + + return fmt.Sprintf("%s:%d", v4Addr, port), nil +} diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go index 8f6598e..63b8d78 100644 --- a/indexer/grpc_service.go +++ b/indexer/grpc_service.go @@ -157,13 +157,14 @@ func (s *GRPCService) getManagerCluster(managerAddr string) (*management.Cluster return nil, err } - managers, err := client.ClusterInfo() + req := &empty.Empty{} + res, err := client.ClusterInfo(req) if err != nil { s.logger.Error(err.Error()) return nil, err } - return managers, nil + return res.Cluster, nil } func (s *GRPCService) cloneManagerCluster(cluster *management.Cluster) (*management.Cluster, error) { @@ -203,7 +204,8 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { } // create stream for watching cluster changes - stream, err := client.ClusterWatch() + req := &empty.Empty{} + stream, err := client.ClusterWatch(req) if err != nil { s.logger.Error(err.Error()) continue @@ -509,8 +511,17 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { s.logger.Error(err.Error()) continue } - s.logger.Info("update shards", zap.Any("shards", snapshotClusterMap)) - err = client.Set(fmt.Sprintf("cluster/shards/%s", s.shardId), snapshotClusterMap) + valueAny := &any.Any{} + err = protobuf.UnmarshalAny(snapshotClusterMap, valueAny) + if err != nil { + s.logger.Error(err.Error()) + continue + } + req := &management.SetRequest{ + Key: fmt.Sprintf("cluster/shards/%s", s.shardId), + Value: valueAny, + } + _, err = client.Set(req) if err != nil { s.logger.Error(err.Error()) continue @@ -548,12 +559,18 @@ func (s *GRPCService) NodeHealthCheck(ctx context.Context, req *index.NodeHealth resp := &index.NodeHealthCheckResponse{} switch req.Probe { + case index.NodeHealthCheckRequest_UNKNOWN: + fallthrough case index.NodeHealthCheckRequest_HEALTHINESS: resp.State = index.NodeHealthCheckResponse_HEALTHY case index.NodeHealthCheckRequest_LIVENESS: resp.State = index.NodeHealthCheckResponse_ALIVE case index.NodeHealthCheckRequest_READINESS: resp.State = index.NodeHealthCheckResponse_READY + default: + err := errors.New("unknown probe") + s.logger.Error(err.Error()) + return resp, status.Error(codes.InvalidArgument, err.Error()) } return resp, nil @@ -589,7 +606,8 @@ func (s *GRPCService) getPeerNode(id string) (*index.Node, error) { return nil, err } - node, err := s.peerClients[id].NodeInfo() + req := &empty.Empty{} + resp, err := s.peerClients[id].NodeInfo(req) if err != nil { s.logger.Debug(err.Error(), zap.String("id", id)) return &index.Node{ @@ -602,7 +620,7 @@ func (s *GRPCService) getPeerNode(id string) (*index.Node, error) { }, nil } - return node, nil + return resp.Node, nil } func (s *GRPCService) getNode(id string) (*index.Node, error) { @@ -641,7 +659,12 @@ func (s *GRPCService) setNode(node *index.Node) error { s.logger.Error(err.Error()) return err } - err = client.ClusterJoin(node) + + req := &index.ClusterJoinRequest{ + Node: node, + } + + _, err = client.ClusterJoin(req) if err != nil { s.logger.Error(err.Error()) return err @@ -677,7 +700,12 @@ func (s *GRPCService) deleteNode(id string) error { s.logger.Error(err.Error()) return err } - err = client.ClusterLeave(id) + + req := &index.ClusterLeaveRequest{ + Id: id, + } + + _, err = client.ClusterLeave(req) if err != nil { s.logger.Error(err.Error()) return err @@ -758,10 +786,10 @@ func (s *GRPCService) ClusterWatch(req *empty.Empty, server index.Index_ClusterW return nil } -func (s *GRPCService) GetDocument(ctx context.Context, req *index.GetDocumentRequest) (*index.GetDocumentResponse, error) { - resp := &index.GetDocumentResponse{} +func (s *GRPCService) Get(ctx context.Context, req *index.GetRequest) (*index.GetResponse, error) { + resp := &index.GetResponse{} - fields, err := s.raftServer.GetDocument(req.Id) + fields, err := s.raftServer.Get(req.Id) if err != nil { switch err { case blasterrors.ErrNotFound: @@ -773,148 +801,155 @@ func (s *GRPCService) GetDocument(ctx context.Context, req *index.GetDocumentReq } } - docMap := map[string]interface{}{ - "id": req.Id, - "fields": fields, - } - - docBytes, err := json.Marshal(docMap) + fieldsAny := &any.Any{} + err = protobuf.UnmarshalAny(fields, fieldsAny) if err != nil { s.logger.Error(err.Error(), zap.String("id", req.Id)) return resp, status.Error(codes.Internal, err.Error()) } - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", req.Id)) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.Document = doc + resp.Fields = fieldsAny return resp, nil } -func (s *GRPCService) Search(ctx context.Context, req *index.SearchRequest) (*index.SearchResponse, error) { - resp := &index.SearchResponse{} - - searchRequest, err := protobuf.MarshalAny(req.SearchRequest) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.InvalidArgument, err.Error()) - } - - searchResult, err := s.raftServer.Search(searchRequest.(*bleve.SearchRequest)) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } +func (s *GRPCService) Index(ctx context.Context, req *index.IndexRequest) (*empty.Empty, error) { + resp := &empty.Empty{} - searchResultAny := &any.Any{} - err = protobuf.UnmarshalAny(searchResult, searchResultAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) + // index + var err error + if s.raftServer.IsLeader() { + err = s.raftServer.Index(&index.Document{Id: req.Id, Fields: req.Fields}) + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) + } + } else { + // forward to leader + client, err := s.getLeaderClient() + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) + } + resp, err = client.Index(req) + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) + } } - resp.SearchResult = searchResultAny - return resp, nil } -func (s *GRPCService) IndexDocument(stream index.Index_IndexDocumentServer) error { - docs := make([]*index.Document, 0) - - for { - req, err := stream.Recv() - if err != nil { - if err == io.EOF { - s.logger.Debug(err.Error()) - break - } - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - - docs = append(docs, req.Document) - } +func (s *GRPCService) Delete(ctx context.Context, req *index.DeleteRequest) (*empty.Empty, error) { + resp := &empty.Empty{} - // index - count := -1 + // delete var err error if s.raftServer.IsLeader() { - count, err = s.raftServer.IndexDocument(docs) + err = s.raftServer.Delete(req.Id) if err != nil { s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) + return resp, status.Error(codes.Internal, err.Error()) } } else { // forward to leader client, err := s.getLeaderClient() if err != nil { s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) + return resp, status.Error(codes.Internal, err.Error()) } - count, err = client.IndexDocument(docs) + resp, err = client.Delete(req) if err != nil { s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) + return resp, status.Error(codes.Internal, err.Error()) } } - return stream.SendAndClose( - &index.IndexDocumentResponse{ - Count: int32(count), - }, - ) + return resp, nil } -func (s *GRPCService) DeleteDocument(stream index.Index_DeleteDocumentServer) error { - ids := make([]string, 0) +func (s *GRPCService) BulkIndex(ctx context.Context, req *index.BulkIndexRequest) (*index.BulkIndexResponse, error) { + resp := &index.BulkIndexResponse{} - for { - req, err := stream.Recv() + if s.raftServer.IsLeader() { + count, err := s.raftServer.BulkIndex(req.Documents) if err != nil { - if err == io.EOF { - s.logger.Debug(err.Error()) - break - } s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) + resp.Count = -1 + return resp, status.Error(codes.Internal, err.Error()) + } + resp.Count = int32(count) + } else { + // forward to leader + client, err := s.getLeaderClient() + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) + } + resp, err = client.BulkIndex(req) + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) } - - ids = append(ids, req.Id) } - // delete - count := -1 - var err error + return resp, nil +} + +func (s *GRPCService) BulkDelete(ctx context.Context, req *index.BulkDeleteRequest) (*index.BulkDeleteResponse, error) { + resp := &index.BulkDeleteResponse{} + if s.raftServer.IsLeader() { - count, err = s.raftServer.DeleteDocument(ids) + count, err := s.raftServer.BulkDelete(req.Ids) if err != nil { s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) + resp.Count = -1 + return resp, status.Error(codes.Internal, err.Error()) } + resp.Count = int32(count) } else { // forward to leader client, err := s.getLeaderClient() if err != nil { s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) + return resp, status.Error(codes.Internal, err.Error()) } - count, err = client.DeleteDocument(ids) + resp, err := client.BulkDelete(req) if err != nil { s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) + return resp, status.Error(codes.Internal, err.Error()) } } - return stream.SendAndClose( - &index.DeleteDocumentResponse{ - Count: int32(count), - }, - ) + return resp, nil +} + +func (s *GRPCService) Search(ctx context.Context, req *index.SearchRequest) (*index.SearchResponse, error) { + resp := &index.SearchResponse{} + + searchRequest, err := protobuf.MarshalAny(req.SearchRequest) + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.InvalidArgument, err.Error()) + } + + searchResult, err := s.raftServer.Search(searchRequest.(*bleve.SearchRequest)) + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) + } + + searchResultAny := &any.Any{} + err = protobuf.UnmarshalAny(searchResult, searchResultAny) + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) + } + + resp.SearchResult = searchResultAny + + return resp, nil } func (s *GRPCService) GetIndexConfig(ctx context.Context, req *empty.Empty) (*index.GetIndexConfigResponse, error) { diff --git a/indexer/http_handler.go b/indexer/http_handler.go index 6f7688a..6a7353f 100644 --- a/indexer/http_handler.go +++ b/indexer/http_handler.go @@ -15,19 +15,11 @@ package indexer import ( - "bufio" - "encoding/json" - "io" - "io/ioutil" "net/http" - "strings" "time" - "github.com/blevesearch/bleve" "github.com/gorilla/mux" - blasterrors "github.com/mosuka/blast/errors" blasthttp "github.com/mosuka/blast/http" - "github.com/mosuka/blast/protobuf/index" "github.com/mosuka/blast/version" "github.com/prometheus/client_golang/prometheus/promhttp" "go.uber.org/zap" @@ -36,43 +28,23 @@ import ( type Router struct { mux.Router - GRPCClient *GRPCClient - logger *zap.Logger + logger *zap.Logger } -func NewRouter(grpcAddr string, logger *zap.Logger) (*Router, error) { - grpcClient, err := NewGRPCClient(grpcAddr) - if err != nil { - return nil, err - } - +func NewRouter(logger *zap.Logger) (*Router, error) { router := &Router{ - GRPCClient: grpcClient, - logger: logger, + logger: logger, } router.StrictSlash(true) router.Handle("/", NewRootHandler(logger)).Methods("GET") - router.Handle("/documents", NewSetDocumentHandler(router.GRPCClient, logger)).Methods("PUT") - router.Handle("/documents", NewDeleteDocumentHandler(router.GRPCClient, logger)).Methods("DELETE") - router.Handle("/documents/{id}", NewGetDocumentHandler(router.GRPCClient, logger)).Methods("GET") - router.Handle("/documents/{id}", NewSetDocumentHandler(router.GRPCClient, logger)).Methods("PUT") - router.Handle("/documents/{id}", NewDeleteDocumentHandler(router.GRPCClient, logger)).Methods("DELETE") - router.Handle("/search", NewSearchHandler(router.GRPCClient, logger)).Methods("POST") router.Handle("/metrics", promhttp.Handler()).Methods("GET") return router, nil } func (r *Router) Close() error { - r.GRPCClient.Cancel() - - err := r.GRPCClient.Close() - if err != nil { - return err - } - return nil } @@ -105,539 +77,3 @@ func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { blasthttp.WriteResponse(w, content, status, h.logger) } - -type GetHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewGetDocumentHandler(client *GRPCClient, logger *zap.Logger) *GetHandler { - return &GetHandler{ - client: client, - logger: logger, - } -} - -func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - vars := mux.Vars(r) - - id := vars["id"] - - doc, err := h.client.GetDocument(id) - if err != nil { - switch err { - case blasterrors.ErrNotFound: - status = http.StatusNotFound - default: - status = http.StatusInternalServerError - } - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - content, err = index.MarshalDocument(doc) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type IndexHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewSetDocumentHandler(client *GRPCClient, logger *zap.Logger) *IndexHandler { - return &IndexHandler{ - client: client, - logger: logger, - } -} - -func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - // create documents - docs := make([]*index.Document, 0) - - vars := mux.Vars(r) - id := vars["id"] - - bulk := func(values []string) bool { - for _, value := range values { - if strings.ToLower(value) == "true" { - return true - } - } - return false - }(r.URL.Query()["bulk"]) - - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if id == "" { - if bulk { - s := strings.NewReader(string(bodyBytes)) - reader := bufio.NewReader(s) - for { - docBytes, err := reader.ReadBytes('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(bodyBytes, doc) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docs = append(docs, doc) - } - break - } - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docs = append(docs, doc) - } - } - } else { - doc := &index.Document{} - err = index.UnmarshalDocument(bodyBytes, doc) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docs = append(docs, doc) - } - } else { - var fieldsMap map[string]interface{} - err := json.Unmarshal([]byte(bodyBytes), &fieldsMap) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docMap := map[string]interface{}{ - "id": id, - "fields": fieldsMap, - } - docBytes, err := json.Marshal(docMap) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - docs = append(docs, doc) - } - - // index documents in bulk - count, err := h.client.IndexDocument(docs) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // create JSON content - msgMap := map[string]interface{}{ - "count": count, - } - content, err = json.MarshalIndent(msgMap, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type DeleteHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewDeleteDocumentHandler(client *GRPCClient, logger *zap.Logger) *DeleteHandler { - return &DeleteHandler{ - client: client, - logger: logger, - } -} - -func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - // create documents - ids := make([]string, 0) - - vars := mux.Vars(r) - id := vars["id"] - - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if id == "" { - s := strings.NewReader(string(bodyBytes)) - reader := bufio.NewReader(s) - for { - docId, err := reader.ReadString('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if docId != "" { - ids = append(ids, docId) - } - break - } - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - if docId != "" { - ids = append(ids, docId) - } - } - } else { - // Deleting a document - ids = append(ids, id) - } - - // delete documents in bulk - count, err := h.client.DeleteDocument(ids) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // create JSON content - msgMap := map[string]interface{}{ - "count": count, - } - content, err = json.MarshalIndent(msgMap, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type SearchHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewSearchHandler(client *GRPCClient, logger *zap.Logger) *SearchHandler { - return &SearchHandler{ - client: client, - logger: logger, - } -} - -func (h *SearchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - searchRequestBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // []byte -> bleve.SearchRequest - searchRequest := bleve.NewSearchRequest(nil) - if len(searchRequestBytes) > 0 { - err := json.Unmarshal(searchRequestBytes, searchRequest) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - } - - searchResult, err := h.client.Search(searchRequest) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - content, err = json.MarshalIndent(&searchResult, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} diff --git a/indexer/raft_command.go b/indexer/raft_command.go deleted file mode 100644 index 3cab8f0..0000000 --- a/indexer/raft_command.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import "encoding/json" - -type command int - -const ( - unknown command = iota - setNode - deleteNode - indexDocument - deleteDocument -) - -type message struct { - Command command `json:"command,omitempty"` - Data json.RawMessage `json:"data,omitempty"` -} - -func newMessage(cmd command, data interface{}) (*message, error) { - b, err := json.Marshal(data) - if err != nil { - return nil, err - } - return &message{ - Command: cmd, - Data: b, - }, nil -} diff --git a/indexer/raft_fsm.go b/indexer/raft_fsm.go index 95590fc..da53222 100644 --- a/indexer/raft_fsm.go +++ b/indexer/raft_fsm.go @@ -128,7 +128,7 @@ func (f *RaftFSM) GetDocument(id string) (map[string]interface{}, error) { return fields, nil } -func (f *RaftFSM) IndexDocument(doc *index.Document) error { +func (f *RaftFSM) Index(doc *index.Document) error { err := f.index.Index(doc) if err != nil { f.logger.Error(err.Error()) @@ -138,7 +138,7 @@ func (f *RaftFSM) IndexDocument(doc *index.Document) error { return nil } -func (f *RaftFSM) IndexDocuments(docs []*index.Document) (int, error) { +func (f *RaftFSM) BulkIndex(docs []*index.Document) (int, error) { count, err := f.index.BulkIndex(docs) if err != nil { f.logger.Error(err.Error()) @@ -148,7 +148,7 @@ func (f *RaftFSM) IndexDocuments(docs []*index.Document) (int, error) { return count, nil } -func (f *RaftFSM) DeleteDocument(id string) error { +func (f *RaftFSM) Delete(id string) error { err := f.index.Delete(id) if err != nil { f.logger.Error(err.Error()) @@ -158,7 +158,7 @@ func (f *RaftFSM) DeleteDocument(id string) error { return nil } -func (f *RaftFSM) DeleteDocuments(ids []string) (int, error) { +func (f *RaftFSM) BulkDelete(ids []string) (int, error) { count, err := f.index.BulkDelete(ids) if err != nil { f.logger.Error(err.Error()) @@ -190,75 +190,67 @@ type fsmResponse struct { error error } -type fsmIndexDocumentResponse struct { +type fsmBulkIndexResponse struct { count int error error } -type fsmDeleteDocumentResponse struct { +type fsmBulkDeleteResponse struct { count int error error } func (f *RaftFSM) Apply(l *raft.Log) interface{} { - var msg message - err := json.Unmarshal(l.Data, &msg) + proposal := &index.Proposal{} + err := proto.Unmarshal(l.Data, proposal) if err != nil { + f.logger.Error(err.Error()) return err } - switch msg.Command { - case setNode: - var data map[string]interface{} - err := json.Unmarshal(msg.Data, &data) + switch proposal.Event { + case index.Proposal_SET_NODE: + err = f.SetNode(proposal.Node) if err != nil { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - b, err := json.Marshal(data["node"]) + return &fsmResponse{error: nil} + case index.Proposal_DELETE_NODE: + err = f.DeleteNode(proposal.Node.Id) if err != nil { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - var node *index.Node - err = json.Unmarshal(b, &node) + return &fsmResponse{error: nil} + case index.Proposal_INDEX: + err := f.Index(proposal.Document) if err != nil { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - err = f.SetNode(node) + return &fsmResponse{error: nil} + case index.Proposal_DELETE: + err := f.Delete(proposal.Id) if err != nil { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - return &fsmResponse{error: err} - case deleteNode: - var data map[string]interface{} - err := json.Unmarshal(msg.Data, &data) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - err = f.DeleteNode(data["id"].(string)) - return &fsmResponse{error: err} - case indexDocument: - var data []*index.Document - err := json.Unmarshal(msg.Data, &data) + return &fsmResponse{error: nil} + case index.Proposal_BULK_INDEX: + count, err := f.BulkIndex(proposal.Documents) if err != nil { f.logger.Error(err.Error()) - return &fsmIndexDocumentResponse{count: -1, error: err} + return &fsmBulkIndexResponse{count: count, error: err} } - count, err := f.IndexDocuments(data) - return &fsmIndexDocumentResponse{count: count, error: err} - case deleteDocument: - var data []string - err := json.Unmarshal(msg.Data, &data) + return &fsmBulkIndexResponse{count: count, error: nil} + case index.Proposal_BULK_DELETE: + count, err := f.BulkDelete(proposal.Ids) if err != nil { f.logger.Error(err.Error()) - return &fsmDeleteDocumentResponse{count: -1, error: err} + return &fsmBulkDeleteResponse{count: count, error: err} } - count, err := f.DeleteDocuments(data) - return &fsmDeleteDocumentResponse{count: count, error: err} + return &fsmBulkDeleteResponse{count: count, error: nil} default: err = errors.New("unsupported command") f.logger.Error(err.Error()) diff --git a/indexer/raft_server.go b/indexer/raft_server.go index 0903e67..39ea9a8 100644 --- a/indexer/raft_server.go +++ b/indexer/raft_server.go @@ -15,7 +15,6 @@ package indexer import ( - "encoding/json" "errors" "io/ioutil" "net" @@ -24,6 +23,9 @@ import ( "time" "github.com/blevesearch/bleve" + + "github.com/golang/protobuf/proto" + "github.com/blevesearch/bleve/mapping" "github.com/hashicorp/raft" raftboltdb "github.com/hashicorp/raft-boltdb" @@ -45,8 +47,9 @@ type RaftServer struct { bootstrap bool logger *zap.Logger - raft *raft.Raft - fsm *RaftFSM + transport *raft.NetworkTransport + raft *raft.Raft + fsm *RaftFSM } func NewRaftServer(node *index.Node, dataDir string, raftStorageType string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { @@ -97,7 +100,7 @@ func (s *RaftServer) Start() error { } s.logger.Info("create TCP transport", zap.String("bind_addr", s.node.BindAddress)) - transport, err := raft.NewTCPTransport(s.node.BindAddress, addr, 3, 10*time.Second, ioutil.Discard) + s.transport, err = raft.NewTCPTransport(s.node.BindAddress, addr, 3, 10*time.Second, ioutil.Discard) if err != nil { s.logger.Fatal(err.Error()) return err @@ -185,7 +188,7 @@ func (s *RaftServer) Start() error { } s.logger.Info("create Raft machine") - s.raft, err = raft.NewRaft(raftConfig, s.fsm, logStore, stableStore, snapshotStore, transport) + s.raft, err = raft.NewRaft(raftConfig, s.fsm, logStore, stableStore, snapshotStore, s.transport) if err != nil { s.logger.Fatal(err.Error()) return err @@ -197,7 +200,7 @@ func (s *RaftServer) Start() error { Servers: []raft.Server{ { ID: raftConfig.LocalID, - Address: transport.LocalAddr(), + Address: s.transport.LocalAddr(), }, }, } @@ -287,6 +290,10 @@ func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { return "", blasterrors.ErrNotFoundLeader } +func (s *RaftServer) NodeAddress() string { + return string(s.transport.LocalAddr()) +} + func (s *RaftServer) NodeID() string { return s.node.Id } @@ -324,24 +331,17 @@ func (s *RaftServer) getNode(nodeId string) (*index.Node, error) { } func (s *RaftServer) setNode(node *index.Node) error { - msg, err := newMessage( - setNode, - map[string]interface{}{ - "node": node, - }, - ) - if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) - return err + proposal := &index.Proposal{ + Event: index.Proposal_SET_NODE, + Node: node, } - - msgBytes, err := json.Marshal(msg) + proposalByte, err := proto.Marshal(proposal) if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) + s.logger.Error(err.Error()) return err } - f := s.raft.Apply(msgBytes, 10*time.Second) + f := s.raft.Apply(proposalByte, 10*time.Second) err = f.Error() if err != nil { s.logger.Error(err.Error(), zap.Any("node", node)) @@ -357,24 +357,19 @@ func (s *RaftServer) setNode(node *index.Node) error { } func (s *RaftServer) deleteNode(nodeId string) error { - msg, err := newMessage( - deleteNode, - map[string]interface{}{ - "id": nodeId, + proposal := &index.Proposal{ + Event: index.Proposal_DELETE_NODE, + Node: &index.Node{ + Id: nodeId, }, - ) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err } - - msgBytes, err := json.Marshal(msg) + proposalByte, err := proto.Marshal(proposal) if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) + s.logger.Error(err.Error()) return err } - f := s.raft.Apply(msgBytes, 10*time.Second) + f := s.raft.Apply(proposalByte, 10*time.Second) err = f.Error() if err != nil { s.logger.Error(err.Error(), zap.String("id", nodeId)) @@ -526,7 +521,7 @@ func (s *RaftServer) Snapshot() error { return nil } -func (s *RaftServer) GetDocument(id string) (map[string]interface{}, error) { +func (s *RaftServer) Get(id string) (map[string]interface{}, error) { fields, err := s.fsm.GetDocument(id) if err != nil { switch err { @@ -541,86 +536,138 @@ func (s *RaftServer) GetDocument(id string) (map[string]interface{}, error) { return fields, nil } -func (s *RaftServer) Search(request *bleve.SearchRequest) (*bleve.SearchResult, error) { - result, err := s.fsm.Search(request) +func (s *RaftServer) Index(doc *index.Document) error { + if !s.IsLeader() { + s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) + return raft.ErrNotLeader + } + + proposal := &index.Proposal{ + Event: index.Proposal_INDEX, + Document: doc, + } + proposalByte, err := proto.Marshal(proposal) if err != nil { s.logger.Error(err.Error()) - return nil, err + return err } - return result, nil + f := s.raft.Apply(proposalByte, 10*time.Second) + err = f.Error() + if err != nil { + s.logger.Error(err.Error()) + return err + } + err = f.Response().(*fsmResponse).error + if err != nil { + s.logger.Error(err.Error()) + return err + } + + return nil } -func (s *RaftServer) IndexDocument(docs []*index.Document) (int, error) { +func (s *RaftServer) Delete(id string) error { if !s.IsLeader() { s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return -1, raft.ErrNotLeader + return raft.ErrNotLeader } - msg, err := newMessage( - indexDocument, - docs, - ) + proposal := &index.Proposal{ + Event: index.Proposal_DELETE, + Id: id, + } + proposalByte, err := proto.Marshal(proposal) if err != nil { s.logger.Error(err.Error()) - return -1, err + return err + } + + f := s.raft.Apply(proposalByte, 10*time.Second) + err = f.Error() + if err != nil { + s.logger.Error(err.Error()) + return err + } + err = f.Response().(*fsmResponse).error + if err != nil { + s.logger.Error(err.Error()) + return err + } + + return nil +} + +func (s *RaftServer) BulkIndex(docs []*index.Document) (int, error) { + if !s.IsLeader() { + s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) + return -1, raft.ErrNotLeader } - msgBytes, err := json.Marshal(msg) + proposal := &index.Proposal{ + Event: index.Proposal_BULK_INDEX, + Documents: docs, + } + proposalByte, err := proto.Marshal(proposal) if err != nil { s.logger.Error(err.Error()) return -1, err } - f := s.raft.Apply(msgBytes, 10*time.Second) + f := s.raft.Apply(proposalByte, 10*time.Second) err = f.Error() if err != nil { s.logger.Error(err.Error()) return -1, err } - err = f.Response().(*fsmIndexDocumentResponse).error + err = f.Response().(*fsmBulkIndexResponse).error if err != nil { s.logger.Error(err.Error()) return -1, err } - return f.Response().(*fsmIndexDocumentResponse).count, nil + return f.Response().(*fsmBulkIndexResponse).count, nil } -func (s *RaftServer) DeleteDocument(ids []string) (int, error) { +func (s *RaftServer) BulkDelete(ids []string) (int, error) { if !s.IsLeader() { s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) return -1, raft.ErrNotLeader } - msg, err := newMessage( - deleteDocument, - ids, - ) + proposal := &index.Proposal{ + Event: index.Proposal_BULK_DELETE, + Ids: ids, + } + proposalByte, err := proto.Marshal(proposal) if err != nil { s.logger.Error(err.Error()) return -1, err } - msgBytes, err := json.Marshal(msg) + f := s.raft.Apply(proposalByte, 10*time.Second) + err = f.Error() if err != nil { s.logger.Error(err.Error()) return -1, err } - - f := s.raft.Apply(msgBytes, 10*time.Second) - err = f.Error() + err = f.Response().(*fsmBulkDeleteResponse).error if err != nil { s.logger.Error(err.Error()) return -1, err } - err = f.Response().(*fsmDeleteDocumentResponse).error + + return f.Response().(*fsmBulkDeleteResponse).count, nil +} + +func (s *RaftServer) Search(request *bleve.SearchRequest) (*bleve.SearchResult, error) { + result, err := s.fsm.Search(request) if err != nil { s.logger.Error(err.Error()) - return -1, err + return nil, err } - return f.Response().(*fsmDeleteDocumentResponse).count, nil + return result, nil } func (s *RaftServer) GetIndexConfig() (map[string]interface{}, error) { diff --git a/indexer/server.go b/indexer/server.go index d6b8bc2..dbea38b 100644 --- a/indexer/server.go +++ b/indexer/server.go @@ -18,13 +18,16 @@ import ( "encoding/json" "fmt" + accesslog "github.com/mash/go-accesslog" "github.com/mosuka/blast/indexutils" - "github.com/blevesearch/bleve/mapping" + "github.com/mosuka/blast/protobuf/management" - accesslog "github.com/mash/go-accesslog" - "github.com/mosuka/blast/errors" + "github.com/blevesearch/bleve/mapping" + "github.com/golang/protobuf/ptypes/empty" + blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/index" "go.uber.org/zap" ) @@ -46,6 +49,7 @@ type Server struct { raftServer *RaftServer grpcService *GRPCService grpcServer *GRPCServer + grpcGateway *GRPCGateway httpRouter *Router httpServer *HTTPServer } @@ -86,20 +90,29 @@ func (s *Server) Start() { return } - clusterIntr, err := mc.Get(fmt.Sprintf("cluster/shards/%s", s.shardId)) - if err != nil && err != errors.ErrNotFound { + req := &management.GetRequest{ + Key: fmt.Sprintf("cluster/shards/%s", s.shardId), + } + res, err := mc.Get(req) + if err != nil && err != blasterrors.ErrNotFound { + s.logger.Fatal(err.Error()) + return + } + value, err := protobuf.MarshalAny(res.Value) + if err != nil { s.logger.Fatal(err.Error()) return } - if clusterIntr != nil { - b, err := json.Marshal(clusterIntr) + if value != nil { + nodes := *value.(*map[string]interface{}) + nodesBytes, err := json.Marshal(nodes) if err != nil { s.logger.Fatal(err.Error()) return } var cluster *index.Cluster - err = json.Unmarshal(b, &cluster) + err = json.Unmarshal(nodesBytes, &cluster) if err != nil { s.logger.Fatal(err.Error()) return @@ -134,31 +147,38 @@ func (s *Server) Start() { return } s.logger.Debug("pull index config from manager", zap.String("address", mc.GetAddress())) - value, err := mc.Get("/index_config") + req := &management.GetRequest{ + Key: "/index_config", + } + resp, err := mc.Get(req) if err != nil { s.logger.Fatal(err.Error()) return } - indexMappingSrc, ok := (*value.(*map[string]interface{}))["index_mapping"] - if ok { - b, err := json.Marshal(indexMappingSrc) - if err != nil { - s.logger.Fatal(err.Error()) - return + value, err := protobuf.MarshalAny(resp.Value) + if value != nil { + indexConfigMap := *value.(*map[string]interface{}) + indexMappingSrc, ok := indexConfigMap["index_mapping"].(map[string]interface{}) + if ok { + indexMappingBytes, err := json.Marshal(indexMappingSrc) + if err != nil { + s.logger.Fatal(err.Error()) + return + } + s.indexMapping, err = indexutils.NewIndexMappingFromBytes(indexMappingBytes) + if err != nil { + s.logger.Fatal(err.Error()) + return + } } - s.indexMapping, err = indexutils.NewIndexMappingFromBytes(b) - if err != nil { - s.logger.Fatal(err.Error()) - return + indexTypeSrc, ok := indexConfigMap["index_type"] + if ok { + s.indexType = indexTypeSrc.(string) + } + indexStorageTypeSrc, ok := indexConfigMap["index_storage_type"] + if ok { + s.indexStorageType = indexStorageTypeSrc.(string) } - } - indexTypeSrc, ok := (*value.(*map[string]interface{}))["index_type"] - if ok { - s.indexType = indexTypeSrc.(string) - } - indexStorageTypeSrc, ok := (*value.(*map[string]interface{}))["index_storage_type"] - if ok { - s.indexStorageType = indexStorageTypeSrc.(string) } } else if s.peerGrpcAddress != "" { pc, err := NewGRPCClient(s.peerGrpcAddress) @@ -176,15 +196,17 @@ func (s *Server) Start() { } s.logger.Debug("pull index config from cluster peer", zap.String("address", pc.GetAddress())) - value, err := pc.GetIndexConfig() + req := &empty.Empty{} + res, err := pc.GetIndexConfig(req) if err != nil { s.logger.Fatal(err.Error()) return } - s.indexMapping = value["index_mapping"].(*mapping.IndexMappingImpl) - s.indexType = value["index_type"].(string) - s.indexStorageType = value["index_storage_type"].(string) + indexMapping, err := protobuf.MarshalAny(res.IndexConfig.IndexMapping) + s.indexMapping = indexMapping.(*mapping.IndexMappingImpl) + s.indexType = res.IndexConfig.IndexType + s.indexStorageType = res.IndexConfig.IndexStorageType } // bootstrap node? @@ -214,8 +236,15 @@ func (s *Server) Start() { return } + // create gRPC gateway + s.grpcGateway, err = NewGRPCGateway(s.node.Metadata.GrpcGatewayAddress, s.node.Metadata.GrpcAddress, s.logger) + if err != nil { + s.logger.Error(err.Error()) + return + } + // create HTTP router - s.httpRouter, err = NewRouter(s.node.Metadata.GrpcAddress, s.logger) + s.httpRouter, err = NewRouter(s.logger) if err != nil { s.logger.Fatal(err.Error()) return @@ -256,6 +285,12 @@ func (s *Server) Start() { } }() + // start gRPC gateway + s.logger.Info("start gRPC gateway") + go func() { + _ = s.grpcGateway.Start() + }() + // start HTTP server s.logger.Info("start HTTP server") go func() { @@ -276,7 +311,11 @@ func (s *Server) Start() { return } - err = client.ClusterJoin(s.node) + req := &index.ClusterJoinRequest{ + Node: s.node, + } + + _, err = client.ClusterJoin(req) if err != nil { s.logger.Fatal(err.Error()) return @@ -291,11 +330,18 @@ func (s *Server) Stop() { s.logger.Error(err.Error()) } + s.logger.Info("stop HTTP router") err = s.httpRouter.Close() if err != nil { s.logger.Error(err.Error()) } + s.logger.Info("stop gRPC gateway") + err = s.grpcGateway.Stop() + if err != nil { + s.logger.Error(err.Error()) + } + s.logger.Info("stop gRPC server") err = s.grpcServer.Stop() if err != nil { diff --git a/indexer/server_test.go b/indexer/server_test.go index dd2f7b1..7563ed3 100644 --- a/indexer/server_test.go +++ b/indexer/server_test.go @@ -26,12 +26,16 @@ import ( "github.com/blevesearch/bleve" "github.com/blevesearch/bleve/mapping" - "github.com/mosuka/blast/errors" + "github.com/golang/protobuf/ptypes/empty" + "github.com/google/go-cmp/cmp" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/logutils" + "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/index" "github.com/mosuka/blast/strutils" "github.com/mosuka/blast/testutils" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) func TestServer_Start(t *testing.T) { @@ -45,6 +49,7 @@ func TestServer_Start(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -59,8 +64,9 @@ func TestServer_Start(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -97,6 +103,7 @@ func TestServer_LivenessProbe(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -111,8 +118,9 @@ func TestServer_LivenessProbe(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -152,36 +160,39 @@ func TestServer_LivenessProbe(t *testing.T) { } // healthiness - healthiness, err := client.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) + reqHealthiness := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_HEALTHINESS} + resHealthiness, err := client.NodeHealthCheck(reqHealthiness) if err != nil { t.Fatalf("%v", err) } - expHealthiness := index.NodeHealthCheckResponse_HEALTHY.String() - actHealthiness := healthiness - if expHealthiness != actHealthiness { - t.Fatalf("expected content to see %v, saw %v", expHealthiness, actHealthiness) + expHealthinessState := index.NodeHealthCheckResponse_HEALTHY + actHealthinessState := resHealthiness.State + if expHealthinessState != actHealthinessState { + t.Fatalf("expected content to see %v, saw %v", expHealthinessState, actHealthinessState) } // liveness - liveness, err := client.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) + reqLiveness := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_LIVENESS} + resLiveness, err := client.NodeHealthCheck(reqLiveness) if err != nil { t.Fatalf("%v", err) } - expLiveness := index.NodeHealthCheckResponse_ALIVE.String() - actLiveness := liveness - if expLiveness != actLiveness { - t.Fatalf("expected content to see %v, saw %v", expLiveness, actLiveness) + expLivenessState := index.NodeHealthCheckResponse_ALIVE + actLivenessState := resLiveness.State + if expLivenessState != actLivenessState { + t.Fatalf("expected content to see %v, saw %v", expLivenessState, actLivenessState) } // readiness - readiness, err := client.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) + reqReadiness := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_READINESS} + resReadiness, err := client.NodeHealthCheck(reqReadiness) if err != nil { t.Fatalf("%v", err) } - expReadiness := index.NodeHealthCheckResponse_READY.String() - actReadiness := readiness - if expReadiness != actReadiness { - t.Fatalf("expected content to see %v, saw %v", expReadiness, actReadiness) + expReadinessState := index.NodeHealthCheckResponse_READY + actReadinessState := resReadiness.State + if expReadinessState != actReadinessState { + t.Fatalf("expected content to see %v, saw %v", expReadinessState, actReadinessState) } } @@ -196,6 +207,7 @@ func TestServer_GetNode(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -210,8 +222,9 @@ func TestServer_GetNode(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -251,7 +264,8 @@ func TestServer_GetNode(t *testing.T) { } // get node - nodeInfo, err := client.NodeInfo() + req := &empty.Empty{} + res, err := client.NodeInfo(req) if err != nil { t.Fatalf("%v", err) } @@ -260,11 +274,12 @@ func TestServer_GetNode(t *testing.T) { BindAddress: bindAddress, State: index.Node_LEADER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } - actNodeInfo := nodeInfo + actNodeInfo := res.Node if !reflect.DeepEqual(expNodeInfo, actNodeInfo) { t.Fatalf("expected content to see %v, saw %v", expNodeInfo, actNodeInfo) } @@ -281,6 +296,7 @@ func TestServer_GetCluster(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -295,8 +311,9 @@ func TestServer_GetCluster(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -336,7 +353,8 @@ func TestServer_GetCluster(t *testing.T) { } // get cluster - cluster, err := client.ClusterInfo() + req := &empty.Empty{} + res, err := client.ClusterInfo(req) if err != nil { t.Fatalf("%v", err) } @@ -347,13 +365,14 @@ func TestServer_GetCluster(t *testing.T) { BindAddress: bindAddress, State: index.Node_LEADER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, }, }, } - actCluster := cluster + actCluster := res.Cluster if !reflect.DeepEqual(expCluster, actCluster) { t.Fatalf("expected content to see %v, saw %v", expCluster, actCluster) } @@ -370,6 +389,7 @@ func TestServer_GetIndexMapping(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -384,8 +404,9 @@ func TestServer_GetIndexMapping(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -426,15 +447,17 @@ func TestServer_GetIndexMapping(t *testing.T) { expIndexMapping := indexMapping - actIndexConfigMap, err := client.GetIndexConfig() + req := &empty.Empty{} + res, err := client.GetIndexConfig(req) if err != nil { t.Fatalf("%v", err) } - actIndexMapping := actIndexConfigMap["index_mapping"].(*mapping.IndexMappingImpl) + im, err := protobuf.MarshalAny(res.IndexConfig.IndexMapping) if err != nil { t.Fatalf("%v", err) } + actIndexMapping := im.(*mapping.IndexMappingImpl) exp, err := json.Marshal(expIndexMapping) if err != nil { @@ -461,6 +484,7 @@ func TestServer_GetIndexType(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -475,8 +499,9 @@ func TestServer_GetIndexType(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -517,12 +542,13 @@ func TestServer_GetIndexType(t *testing.T) { expIndexType := indexType - actIndexConfigMap, err := client.GetIndexConfig() + req := &empty.Empty{} + res, err := client.GetIndexConfig(req) if err != nil { t.Fatalf("%v", err) } - actIndexType := actIndexConfigMap["index_type"].(string) + actIndexType := res.IndexConfig.IndexType if !reflect.DeepEqual(expIndexType, actIndexType) { t.Fatalf("expected content to see %v, saw %v", expIndexType, actIndexType) @@ -540,6 +566,7 @@ func TestServer_GetIndexStorageType(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -554,8 +581,9 @@ func TestServer_GetIndexStorageType(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -596,12 +624,13 @@ func TestServer_GetIndexStorageType(t *testing.T) { expIndexStorageType := indexStorageType - actIndexConfigMap, err := client.GetIndexConfig() + req := &empty.Empty{} + res, err := client.GetIndexConfig(req) if err != nil { t.Fatalf("%v", err) } - actIndexStorageType := actIndexConfigMap["index_storage_type"].(string) + actIndexStorageType := res.IndexConfig.IndexStorageType if !reflect.DeepEqual(expIndexStorageType, actIndexStorageType) { t.Fatalf("expected content to see %v, saw %v", expIndexStorageType, actIndexStorageType) @@ -619,6 +648,7 @@ func TestServer_GetIndexStats(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -633,8 +663,9 @@ func TestServer_GetIndexStats(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -689,17 +720,24 @@ func TestServer_GetIndexStats(t *testing.T) { "searches": float64(0), } - actIndexStats, err := client.GetIndexStats() + req := &empty.Empty{} + res, err := client.GetIndexStats(req) if err != nil { t.Fatalf("%v", err) } + is, err := protobuf.MarshalAny(res.IndexStats) + if err != nil { + t.Fatalf("%v", err) + } + actIndexStats := *is.(*map[string]interface{}) + if !reflect.DeepEqual(expIndexStats, actIndexStats) { t.Fatalf("expected content to see %v, saw %v", expIndexStats, actIndexStats) } } -func TestServer_PutDocument(t *testing.T) { +func TestServer_Index(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -710,6 +748,7 @@ func TestServer_PutDocument(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -724,8 +763,9 @@ func TestServer_PutDocument(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -764,10 +804,8 @@ func TestServer_PutDocument(t *testing.T) { t.Fatalf("%v", err) } - // put document - docs := make([]*index.Document, 0) + // index document docPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - // read index mapping file docFile1, err := os.Open(docPath1) if err != nil { t.Fatalf("%v", err) @@ -784,21 +822,17 @@ func TestServer_PutDocument(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - docs = append(docs, doc1) - count, err := client.IndexDocument(docs) + req := &index.IndexRequest{ + Id: doc1.Id, + Fields: doc1.Fields, + } + _, err = client.Index(req) if err != nil { t.Fatalf("%v", err) } - - expCount := 1 - actCount := count - - if expCount != actCount { - t.Fatalf("expected content to see %v, saw %v", expCount, actCount) - } } -func TestServer_GetDocument(t *testing.T) { +func TestServer_Get(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -809,6 +843,7 @@ func TestServer_GetDocument(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -823,8 +858,9 @@ func TestServer_GetDocument(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -863,61 +899,69 @@ func TestServer_GetDocument(t *testing.T) { t.Fatalf("%v", err) } - // put document - putDocs := make([]*index.Document, 0) - putDocPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - // read index mapping file - putDocFile1, err := os.Open(putDocPath1) + // index document + docPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") + docFile1, err := os.Open(docPath1) if err != nil { t.Fatalf("%v", err) } defer func() { - _ = putDocFile1.Close() + _ = docFile1.Close() }() - putDocBytes1, err := ioutil.ReadAll(putDocFile1) + docBytes1, err := ioutil.ReadAll(docFile1) if err != nil { t.Fatalf("%v", err) } - putDoc1 := &index.Document{} - err = index.UnmarshalDocument(putDocBytes1, putDoc1) + doc1 := &index.Document{} + err = index.UnmarshalDocument(docBytes1, doc1) if err != nil { t.Fatalf("%v", err) } - putDocs = append(putDocs, putDoc1) - putCount, err := client.IndexDocument(putDocs) + indexReq := &index.IndexRequest{ + Id: doc1.Id, + Fields: doc1.Fields, + } + _, err = client.Index(indexReq) if err != nil { t.Fatalf("%v", err) } - expPutCount := 1 - actPutCount := putCount - - if expPutCount != actPutCount { - t.Fatalf("expected content to see %v, saw %v", expPutCount, actPutCount) - } - // get document - getDoc1, err := client.GetDocument("enwiki_1") + getReq := &index.GetRequest{Id: "enwiki_1"} + getRes, err := client.Get(getReq) if err != nil { t.Fatalf("%v", err) } - expGetDoc1, _ := index.MarshalDocument(putDoc1) - actGetDoc1, _ := index.MarshalDocument(getDoc1) - if !reflect.DeepEqual(expGetDoc1, actGetDoc1) { - t.Fatalf("expected content to see %v, saw %v", expGetDoc1, actGetDoc1) + expFields, err := protobuf.MarshalAny(doc1.Fields) + if err != nil { + t.Fatalf("%v", err) + } + actFields, err := protobuf.MarshalAny(getRes.Fields) + if err != nil { + t.Fatalf("%v", err) + } + if !cmp.Equal(expFields, actFields) { + t.Fatalf("expected content to see %v, saw %v", expFields, actFields) } // get non-existing document - getDocFields2, err := client.GetDocument("doc2") - if err != errors.ErrNotFound { - t.Fatalf("%v", err) + getReq2 := &index.GetRequest{Id: "non-existing"} + getRes2, err := client.Get(getReq2) + if err != nil { + st, _ := status.FromError(err) + switch st.Code() { + case codes.NotFound: + // noop + default: + t.Fatalf("%v", err) + } } - if getDocFields2 != nil { - t.Fatalf("expected content to see nil, saw %v", getDocFields2) + if getRes2 != nil { + t.Fatalf("expected content to see nil, saw %v", getRes2) } } -func TestServer_DeleteDocument(t *testing.T) { +func TestServer_Delete(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -928,6 +972,7 @@ func TestServer_DeleteDocument(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -942,8 +987,9 @@ func TestServer_DeleteDocument(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -982,86 +1028,84 @@ func TestServer_DeleteDocument(t *testing.T) { t.Fatalf("%v", err) } - // put document - putDocs := make([]*index.Document, 0) - putDocPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - // read index mapping file - putDocFile1, err := os.Open(putDocPath1) + // index document + docPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") + docFile1, err := os.Open(docPath1) if err != nil { t.Fatalf("%v", err) } defer func() { - _ = putDocFile1.Close() + _ = docFile1.Close() }() - putDocBytes1, err := ioutil.ReadAll(putDocFile1) + docBytes1, err := ioutil.ReadAll(docFile1) if err != nil { t.Fatalf("%v", err) } - putDoc1 := &index.Document{} - err = index.UnmarshalDocument(putDocBytes1, putDoc1) + doc1 := &index.Document{} + err = index.UnmarshalDocument(docBytes1, doc1) if err != nil { t.Fatalf("%v", err) } - putDocs = append(putDocs, putDoc1) - putCount, err := client.IndexDocument(putDocs) + indexReq := &index.IndexRequest{ + Id: doc1.Id, + Fields: doc1.Fields, + } + _, err = client.Index(indexReq) if err != nil { t.Fatalf("%v", err) } - expPutCount := 1 - actPutCount := putCount - - if expPutCount != actPutCount { - t.Fatalf("expected content to see %v, saw %v", expPutCount, actPutCount) - } - // get document - getDoc1, err := client.GetDocument("enwiki_1") + getReq := &index.GetRequest{Id: "enwiki_1"} + getRes, err := client.Get(getReq) if err != nil { t.Fatalf("%v", err) } - expGetDoc1, _ := index.MarshalDocument(putDoc1) - actGetDoc1, _ := index.MarshalDocument(getDoc1) - if !reflect.DeepEqual(expGetDoc1, actGetDoc1) { - t.Fatalf("expected content to see %v, saw %v", expGetDoc1, actGetDoc1) + expFields, err := protobuf.MarshalAny(doc1.Fields) + if err != nil { + t.Fatalf("%v", err) } - - // get non-existing document - getDoc2, err := client.GetDocument("non-existing") - if err != errors.ErrNotFound { + actFields, err := protobuf.MarshalAny(getRes.Fields) + if err != nil { t.Fatalf("%v", err) } - if getDoc2 != nil { - t.Fatalf("expected content to see nil, saw %v", getDoc2) + if !cmp.Equal(expFields, actFields) { + t.Fatalf("expected content to see %v, saw %v", expFields, actFields) } // delete document - delCount, err := client.DeleteDocument([]string{"enwiki_1"}) + deleteReq := &index.DeleteRequest{Id: "enwiki_1"} + _, err = client.Delete(deleteReq) if err != nil { t.Fatalf("%v", err) } - expDelCount := 1 - actDelCount := delCount - if expDelCount != actDelCount { - t.Fatalf("expected content to see %v, saw %v", expDelCount, actDelCount) - } - // get document - getDoc1, err = client.GetDocument("enwiki_1") - if err != errors.ErrNotFound { - t.Fatalf("%v", err) + // get document again + getRes, err = client.Get(getReq) + if err != nil { + st, _ := status.FromError(err) + switch st.Code() { + case codes.NotFound: + // noop + default: + t.Fatalf("%v", err) + } } - if getDoc1 != nil { - t.Fatalf("expected content to see nil, saw %v", getDoc1) + if getRes != nil { + t.Fatalf("expected content to see nil, saw %v", getRes) } // delete non-existing document - getDoc1, err = client.GetDocument("non-existing") - if err != errors.ErrNotFound { - t.Fatalf("%v", err) - } - if getDoc1 != nil { - t.Fatalf("expected content to see nil, saw %v", getDoc1) + deleteReq2 := &index.DeleteRequest{Id: "non-existing"} + _, err = client.Delete(deleteReq2) + if err != nil { + st, _ := status.FromError(err) + switch st.Code() { + case codes.NotFound: + // noop + default: + t.Fatalf("%v", err) + } } } @@ -1076,6 +1120,7 @@ func TestServer_Search(t *testing.T) { shardId := "" peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1090,8 +1135,9 @@ func TestServer_Search(t *testing.T) { BindAddress: bindAddress, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -1130,42 +1176,53 @@ func TestServer_Search(t *testing.T) { t.Fatalf("%v", err) } - // put document - putDocs := make([]*index.Document, 0) - putDocPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - // read index mapping file - putDocFile1, err := os.Open(putDocPath1) + // index document + docPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") + docFile1, err := os.Open(docPath1) if err != nil { t.Fatalf("%v", err) } defer func() { - _ = putDocFile1.Close() + _ = docFile1.Close() }() - putDocBytes1, err := ioutil.ReadAll(putDocFile1) + docBytes1, err := ioutil.ReadAll(docFile1) if err != nil { t.Fatalf("%v", err) } - putDoc1 := &index.Document{} - err = index.UnmarshalDocument(putDocBytes1, putDoc1) + doc1 := &index.Document{} + err = index.UnmarshalDocument(docBytes1, doc1) if err != nil { t.Fatalf("%v", err) } - putDocs = append(putDocs, putDoc1) - putCount, err := client.IndexDocument(putDocs) + indexReq := &index.IndexRequest{ + Id: doc1.Id, + Fields: doc1.Fields, + } + _, err = client.Index(indexReq) if err != nil { t.Fatalf("%v", err) } - expPutCount := 1 - actPutCount := putCount - - if expPutCount != actPutCount { - t.Fatalf("expected content to see %v, saw %v", expPutCount, actPutCount) + // get document + getReq := &index.GetRequest{Id: "enwiki_1"} + getRes, err := client.Get(getReq) + if err != nil { + t.Fatalf("%v", err) + } + expFields, err := protobuf.MarshalAny(doc1.Fields) + if err != nil { + t.Fatalf("%v", err) + } + actFields, err := protobuf.MarshalAny(getRes.Fields) + if err != nil { + t.Fatalf("%v", err) + } + if !cmp.Equal(expFields, actFields) { + t.Fatalf("expected content to see %v, saw %v", expFields, actFields) } // search searchRequestPath := filepath.Join(curDir, "../example/wiki_search_request.json") - searchRequestFile, err := os.Open(searchRequestPath) if err != nil { t.Fatalf("%v", err) @@ -1173,24 +1230,27 @@ func TestServer_Search(t *testing.T) { defer func() { _ = searchRequestFile.Close() }() - searchRequestByte, err := ioutil.ReadAll(searchRequestFile) if err != nil { t.Fatalf("%v", err) } - searchRequest := bleve.NewSearchRequest(nil) - err = json.Unmarshal(searchRequestByte, searchRequest) + searchReq := &index.SearchRequest{} + marshaler := JsonMarshaler{} + err = marshaler.Unmarshal(searchRequestByte, searchReq) if err != nil { t.Fatalf("%v", err) } - - searchResult1, err := client.Search(searchRequest) + searchRes, err := client.Search(searchReq) + if err != nil { + t.Fatalf("%v", err) + } + searchResult, err := protobuf.MarshalAny(searchRes.SearchResult) if err != nil { t.Fatalf("%v", err) } expTotal := uint64(1) - actTotal := searchResult1.Total + actTotal := searchResult.(*bleve.SearchResult).Total if expTotal != actTotal { t.Fatalf("expected content to see %v, saw %v", expTotal, actTotal) } @@ -1207,6 +1267,7 @@ func TestCluster_Start(t *testing.T) { shardId1 := "" peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1221,8 +1282,9 @@ func TestCluster_Start(t *testing.T) { BindAddress: bindAddress1, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -1248,6 +1310,7 @@ func TestCluster_Start(t *testing.T) { shardId2 := "" peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1262,8 +1325,9 @@ func TestCluster_Start(t *testing.T) { BindAddress: bindAddress2, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -1289,6 +1353,7 @@ func TestCluster_Start(t *testing.T) { shardId3 := "" peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1303,8 +1368,9 @@ func TestCluster_Start(t *testing.T) { BindAddress: bindAddress3, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -1330,7 +1396,7 @@ func TestCluster_Start(t *testing.T) { time.Sleep(5 * time.Second) } -func TestCluster_LivenessProbe(t *testing.T) { +func TestCluster_HealthCheck(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -1341,6 +1407,7 @@ func TestCluster_LivenessProbe(t *testing.T) { shardId1 := "" peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1355,8 +1422,9 @@ func TestCluster_LivenessProbe(t *testing.T) { BindAddress: bindAddress1, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -1382,6 +1450,7 @@ func TestCluster_LivenessProbe(t *testing.T) { shardId2 := "" peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1396,8 +1465,9 @@ func TestCluster_LivenessProbe(t *testing.T) { BindAddress: bindAddress2, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -1423,6 +1493,7 @@ func TestCluster_LivenessProbe(t *testing.T) { shardId3 := "" peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1437,8 +1508,9 @@ func TestCluster_LivenessProbe(t *testing.T) { BindAddress: bindAddress3, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -1486,101 +1558,105 @@ func TestCluster_LivenessProbe(t *testing.T) { t.Fatalf("%v", err) } + healthinessReq := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_HEALTHINESS} + livenessReq := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_LIVENESS} + readinessReq := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_READINESS} + // healthiness - healthiness1, err := client1.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) + healthinessRes1, err := client1.NodeHealthCheck(healthinessReq) if err != nil { t.Fatalf("%v", err) } - expHealthiness1 := index.NodeHealthCheckResponse_HEALTHY.String() - actHealthiness1 := healthiness1 + expHealthiness1 := index.NodeHealthCheckResponse_HEALTHY + actHealthiness1 := healthinessRes1.State if expHealthiness1 != actHealthiness1 { t.Fatalf("expected content to see %v, saw %v", expHealthiness1, actHealthiness1) } // liveness - liveness1, err := client1.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) + livenessRes1, err := client1.NodeHealthCheck(livenessReq) if err != nil { t.Fatalf("%v", err) } - expLiveness1 := index.NodeHealthCheckResponse_ALIVE.String() - actLiveness1 := liveness1 + expLiveness1 := index.NodeHealthCheckResponse_ALIVE + actLiveness1 := livenessRes1.State if expLiveness1 != actLiveness1 { t.Fatalf("expected content to see %v, saw %v", expLiveness1, actLiveness1) } // readiness - readiness1, err := client1.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) + readinessRes1, err := client1.NodeHealthCheck(readinessReq) if err != nil { t.Fatalf("%v", err) } - expReadiness1 := index.NodeHealthCheckResponse_READY.String() - actReadiness1 := readiness1 + expReadiness1 := index.NodeHealthCheckResponse_READY + actReadiness1 := readinessRes1.State if expReadiness1 != actReadiness1 { t.Fatalf("expected content to see %v, saw %v", expReadiness1, actReadiness1) } // healthiness - healthiness2, err := client2.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) + healthinessRes2, err := client2.NodeHealthCheck(healthinessReq) if err != nil { t.Fatalf("%v", err) } - expHealthiness2 := index.NodeHealthCheckResponse_HEALTHY.String() - actHealthiness2 := healthiness2 + expHealthiness2 := index.NodeHealthCheckResponse_HEALTHY + actHealthiness2 := healthinessRes2.State if expHealthiness2 != actHealthiness2 { t.Fatalf("expected content to see %v, saw %v", expHealthiness2, actHealthiness2) } // liveness - liveness2, err := client2.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) + livenessRes2, err := client2.NodeHealthCheck(livenessReq) if err != nil { t.Fatalf("%v", err) } - expLiveness2 := index.NodeHealthCheckResponse_ALIVE.String() - actLiveness2 := liveness2 + expLiveness2 := index.NodeHealthCheckResponse_ALIVE + actLiveness2 := livenessRes2.State if expLiveness2 != actLiveness2 { t.Fatalf("expected content to see %v, saw %v", expLiveness2, actLiveness2) } // readiness - readiness2, err := client2.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) + readinessRes2, err := client2.NodeHealthCheck(readinessReq) if err != nil { t.Fatalf("%v", err) } - expReadiness2 := index.NodeHealthCheckResponse_READY.String() - actReadiness2 := readiness2 + expReadiness2 := index.NodeHealthCheckResponse_READY + actReadiness2 := readinessRes2.State if expReadiness2 != actReadiness2 { t.Fatalf("expected content to see %v, saw %v", expReadiness2, actReadiness2) } // healthiness - healthiness3, err := client3.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) + healthinessRes3, err := client3.NodeHealthCheck(healthinessReq) if err != nil { t.Fatalf("%v", err) } - expHealthiness3 := index.NodeHealthCheckResponse_HEALTHY.String() - actHealthiness3 := healthiness3 + expHealthiness3 := index.NodeHealthCheckResponse_HEALTHY + actHealthiness3 := healthinessRes3.State if expHealthiness3 != actHealthiness3 { t.Fatalf("expected content to see %v, saw %v", expHealthiness3, actHealthiness3) } // liveness - liveness3, err := client3.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) + livenessRes3, err := client3.NodeHealthCheck(livenessReq) if err != nil { t.Fatalf("%v", err) } - expLiveness3 := index.NodeHealthCheckResponse_ALIVE.String() - actLiveness3 := liveness3 + expLiveness3 := index.NodeHealthCheckResponse_ALIVE + actLiveness3 := livenessRes3.State if expLiveness3 != actLiveness3 { t.Fatalf("expected content to see %v, saw %v", expLiveness3, actLiveness3) } // readiness - readiness3, err := client3.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) + readinessRes3, err := client3.NodeHealthCheck(readinessReq) if err != nil { t.Fatalf("%v", err) } - expReadiness3 := index.NodeHealthCheckResponse_READY.String() - actReadiness3 := readiness3 + expReadiness3 := index.NodeHealthCheckResponse_READY + actReadiness3 := readinessRes3.State if expReadiness3 != actReadiness3 { t.Fatalf("expected content to see %v, saw %v", expReadiness3, actReadiness3) } @@ -1597,6 +1673,7 @@ func TestCluster_GetNode(t *testing.T) { shardId1 := "" peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1611,8 +1688,9 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress1, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -1638,6 +1716,7 @@ func TestCluster_GetNode(t *testing.T) { shardId2 := "" peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1652,8 +1731,9 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress2, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -1679,6 +1759,7 @@ func TestCluster_GetNode(t *testing.T) { shardId3 := "" peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1693,8 +1774,9 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress3, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -1743,7 +1825,7 @@ func TestCluster_GetNode(t *testing.T) { } // get all node info from all nodes - node11, err := client1.NodeInfo() + node11, err := client1.NodeInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -1752,16 +1834,17 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress1, State: index.Node_LEADER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } - actNode11 := node11 + actNode11 := node11.Node if !reflect.DeepEqual(expNode11, actNode11) { t.Fatalf("expected content to see %v, saw %v", expNode11, actNode11) } - node21, err := client2.NodeInfo() + node21, err := client2.NodeInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -1770,16 +1853,17 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress2, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } - actNode21 := node21 + actNode21 := node21.Node if !reflect.DeepEqual(expNode21, actNode21) { t.Fatalf("expected content to see %v, saw %v", expNode21, actNode21) } - node31, err := client3.NodeInfo() + node31, err := client3.NodeInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -1788,11 +1872,12 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress3, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } - actNode31 := node31 + actNode31 := node31.Node if !reflect.DeepEqual(expNode31, actNode31) { t.Fatalf("expected content to see %v, saw %v", expNode31, actNode31) } @@ -1809,6 +1894,7 @@ func TestCluster_GetCluster(t *testing.T) { shardId1 := "" peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1823,8 +1909,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress1, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -1850,6 +1937,7 @@ func TestCluster_GetCluster(t *testing.T) { shardId2 := "" peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1864,8 +1952,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress2, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -1891,6 +1980,7 @@ func TestCluster_GetCluster(t *testing.T) { shardId3 := "" peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1905,8 +1995,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress3, State: index.Node_UNKNOWN, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -1955,7 +2046,7 @@ func TestCluster_GetCluster(t *testing.T) { } // get cluster info from manager1 - cluster1, err := client1.ClusterInfo() + cluster1, err := client1.ClusterInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -1966,8 +2057,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress1, State: index.Node_LEADER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, }, nodeId2: { @@ -1975,8 +2067,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress2, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, }, nodeId3: { @@ -1984,18 +2077,19 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress3, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, }, }, } - actCluster1 := cluster1 + actCluster1 := cluster1.Cluster if !reflect.DeepEqual(expCluster1, actCluster1) { t.Fatalf("expected content to see %v, saw %v", expCluster1, actCluster1) } - cluster2, err := client2.ClusterInfo() + cluster2, err := client2.ClusterInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -2006,8 +2100,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress1, State: index.Node_LEADER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, }, nodeId2: { @@ -2015,8 +2110,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress2, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, }, nodeId3: { @@ -2024,18 +2120,19 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress3, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, }, }, } - actCluster2 := cluster2 + actCluster2 := cluster2.Cluster if !reflect.DeepEqual(expCluster2, actCluster2) { t.Fatalf("expected content to see %v, saw %v", expCluster2, actCluster2) } - cluster3, err := client3.ClusterInfo() + cluster3, err := client3.ClusterInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -2046,8 +2143,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress1, State: index.Node_LEADER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, }, nodeId2: { @@ -2055,8 +2153,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress2, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, }, nodeId3: { @@ -2064,13 +2163,14 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress3, State: index.Node_FOLLOWER, Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, }, }, } - actCluster3 := cluster3 + actCluster3 := cluster3.Cluster if !reflect.DeepEqual(expCluster3, actCluster3) { t.Fatalf("expected content to see %v, saw %v", expCluster3, actCluster3) } diff --git a/manager/grpc_client.go b/manager/grpc_client.go index 6935724..4d732a4 100644 --- a/manager/grpc_client.go +++ b/manager/grpc_client.go @@ -16,13 +16,9 @@ package manager import ( "context" - "errors" "math" - "github.com/golang/protobuf/ptypes/any" "github.com/golang/protobuf/ptypes/empty" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/management" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -95,180 +91,66 @@ func (c *GRPCClient) GetAddress() string { return c.conn.Target() } -func (c *GRPCClient) NodeHealthCheck(probe string, opts ...grpc.CallOption) (string, error) { - req := &management.NodeHealthCheckRequest{} - - switch probe { - case management.NodeHealthCheckRequest_HEALTHINESS.String(): - req.Probe = management.NodeHealthCheckRequest_HEALTHINESS - case management.NodeHealthCheckRequest_LIVENESS.String(): - req.Probe = management.NodeHealthCheckRequest_LIVENESS - case management.NodeHealthCheckRequest_READINESS.String(): - req.Probe = management.NodeHealthCheckRequest_READINESS - default: - req.Probe = management.NodeHealthCheckRequest_HEALTHINESS - } - - resp, err := c.client.NodeHealthCheck(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - return management.NodeHealthCheckResponse_UNHEALTHY.String(), errors.New(st.Message()) - } - - return resp.State.String(), nil +func (c *GRPCClient) NodeHealthCheck(req *management.NodeHealthCheckRequest, opts ...grpc.CallOption) (*management.NodeHealthCheckResponse, error) { + return c.client.NodeHealthCheck(c.ctx, req, opts...) } -func (c *GRPCClient) NodeInfo(opts ...grpc.CallOption) (*management.Node, error) { - resp, err := c.client.NodeInfo(c.ctx, &empty.Empty{}, opts...) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - - return resp.Node, nil +func (c *GRPCClient) NodeInfo(req *empty.Empty, opts ...grpc.CallOption) (*management.NodeInfoResponse, error) { + return c.client.NodeInfo(c.ctx, req, opts...) } -func (c *GRPCClient) ClusterJoin(node *management.Node, opts ...grpc.CallOption) error { - req := &management.ClusterJoinRequest{ - Node: node, - } - - _, err := c.client.ClusterJoin(c.ctx, req, opts...) - if err != nil { - return err - } - - return nil +func (c *GRPCClient) ClusterJoin(req *management.ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.ClusterJoin(c.ctx, req, opts...) } -func (c *GRPCClient) ClusterLeave(id string, opts ...grpc.CallOption) error { - req := &management.ClusterLeaveRequest{ - Id: id, - } - - _, err := c.client.ClusterLeave(c.ctx, req, opts...) - if err != nil { - return err - } - - return nil +func (c *GRPCClient) ClusterLeave(req *management.ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.ClusterLeave(c.ctx, req, opts...) } -func (c *GRPCClient) ClusterInfo(opts ...grpc.CallOption) (*management.Cluster, error) { - resp, err := c.client.ClusterInfo(c.ctx, &empty.Empty{}, opts...) - if err != nil { - st, _ := status.FromError(err) - - return nil, errors.New(st.Message()) - } - - return resp.Cluster, nil +func (c *GRPCClient) ClusterInfo(req *empty.Empty, opts ...grpc.CallOption) (*management.ClusterInfoResponse, error) { + return c.client.ClusterInfo(c.ctx, &empty.Empty{}, opts...) } -func (c *GRPCClient) ClusterWatch(opts ...grpc.CallOption) (management.Management_ClusterWatchClient, error) { - req := &empty.Empty{} - - watchClient, err := c.client.ClusterWatch(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - return nil, errors.New(st.Message()) - } - - return watchClient, nil +func (c *GRPCClient) ClusterWatch(req *empty.Empty, opts ...grpc.CallOption) (management.Management_ClusterWatchClient, error) { + return c.client.ClusterWatch(c.ctx, req, opts...) } -func (c *GRPCClient) Get(key string, opts ...grpc.CallOption) (interface{}, error) { - req := &management.GetRequest{ - Key: key, - } - - resp, err := c.client.Get(c.ctx, req, opts...) +func (c *GRPCClient) Get(req *management.GetRequest, opts ...grpc.CallOption) (*management.GetResponse, error) { + res, err := c.client.Get(c.ctx, req, opts...) if err != nil { st, _ := status.FromError(err) - switch st.Code() { case codes.NotFound: - return nil, blasterrors.ErrNotFound + return &management.GetResponse{}, nil default: - return nil, errors.New(st.Message()) + return nil, err } } - - value, err := protobuf.MarshalAny(resp.Value) - - return value, nil + return res, nil } -func (c *GRPCClient) Set(key string, value interface{}, opts ...grpc.CallOption) error { - valueAny := &any.Any{} - err := protobuf.UnmarshalAny(value, valueAny) - if err != nil { - return err - } - - req := &management.SetRequest{ - Key: key, - Value: valueAny, - } - - _, err = c.client.Set(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - - switch st.Code() { - case codes.NotFound: - return blasterrors.ErrNotFound - default: - return errors.New(st.Message()) - } - } - - return nil +func (c *GRPCClient) Set(req *management.SetRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.Set(c.ctx, req, opts...) } -func (c *GRPCClient) Delete(key string, opts ...grpc.CallOption) error { - req := &management.DeleteRequest{ - Key: key, - } - - _, err := c.client.Delete(c.ctx, req, opts...) +func (c *GRPCClient) Delete(req *management.DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + res, err := c.client.Delete(c.ctx, req, opts...) if err != nil { st, _ := status.FromError(err) - switch st.Code() { case codes.NotFound: - return blasterrors.ErrNotFound + return &empty.Empty{}, nil default: - return errors.New(st.Message()) + return nil, err } } - - return nil + return res, nil } -func (c *GRPCClient) Watch(key string, opts ...grpc.CallOption) (management.Management_WatchClient, error) { - req := &management.WatchRequest{ - Key: key, - } - - watchClient, err := c.client.Watch(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - return nil, errors.New(st.Message()) - } - - return watchClient, nil +func (c *GRPCClient) Watch(req *management.WatchRequest, opts ...grpc.CallOption) (management.Management_WatchClient, error) { + return c.client.Watch(c.ctx, req, opts...) } -func (c *GRPCClient) Snapshot(opts ...grpc.CallOption) error { - _, err := c.client.Snapshot(c.ctx, &empty.Empty{}) - if err != nil { - st, _ := status.FromError(err) - - return errors.New(st.Message()) - } - - return nil +func (c *GRPCClient) Snapshot(req *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { + return c.client.Snapshot(c.ctx, &empty.Empty{}) } diff --git a/manager/grpc_gateway.go b/manager/grpc_gateway.go new file mode 100644 index 0000000..3f505d4 --- /dev/null +++ b/manager/grpc_gateway.go @@ -0,0 +1,172 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manager + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + + "github.com/golang/protobuf/ptypes/any" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/management" + "go.uber.org/zap" + "google.golang.org/grpc" +) + +type JsonMarshaler struct{} + +// ContentType always Returns "application/json". +func (*JsonMarshaler) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JsonMarshaler) Marshal(v interface{}) ([]byte, error) { + switch v.(type) { + case *management.GetResponse: + value, err := protobuf.MarshalAny(v.(*management.GetResponse).Value) + if err != nil { + return nil, err + } + return json.Marshal( + map[string]interface{}{ + "value": value, + }, + ) + default: + return json.Marshal(v) + } +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JsonMarshaler) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JsonMarshaler) NewDecoder(r io.Reader) runtime.Decoder { + return runtime.DecoderFunc( + func(v interface{}) error { + buffer, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + switch v.(type) { + case *management.SetRequest: + var tmpValue map[string]interface{} + err = json.Unmarshal(buffer, &tmpValue) + if err != nil { + return err + } + value, ok := tmpValue["value"] + if !ok { + return errors.New("value does not exist") + } + v.(*management.SetRequest).Value = &any.Any{} + return protobuf.UnmarshalAny(value, v.(*management.SetRequest).Value) + default: + return json.Unmarshal(buffer, v) + } + }, + ) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JsonMarshaler) NewEncoder(w io.Writer) runtime.Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JsonMarshaler) Delimiter() []byte { + return []byte("\n") +} + +type GRPCGateway struct { + grpcGatewayAddr string + grpcAddr string + logger *zap.Logger + + ctx context.Context + cancel context.CancelFunc + listener net.Listener +} + +func NewGRPCGateway(grpcGatewayAddr string, grpcAddr string, logger *zap.Logger) (*GRPCGateway, error) { + return &GRPCGateway{ + grpcGatewayAddr: grpcGatewayAddr, + grpcAddr: grpcAddr, + logger: logger, + }, nil +} + +func (s *GRPCGateway) Start() error { + s.ctx, s.cancel = NewGRPCContext() + + mux := runtime.NewServeMux( + runtime.WithMarshalerOption("application/json", new(JsonMarshaler)), + ) + opts := []grpc.DialOption{grpc.WithInsecure()} + + err := management.RegisterManagementHandlerFromEndpoint(s.ctx, mux, s.grpcAddr, opts) + if err != nil { + return err + } + + s.listener, err = net.Listen("tcp", s.grpcGatewayAddr) + if err != nil { + return err + } + + err = http.Serve(s.listener, mux) + if err != nil { + return err + } + + return nil +} + +func (s *GRPCGateway) Stop() error { + defer s.cancel() + + err := s.listener.Close() + if err != nil { + return err + } + + return nil +} + +func (s *GRPCGateway) GetAddress() (string, error) { + tcpAddr, err := net.ResolveTCPAddr("tcp", s.listener.Addr().String()) + if err != nil { + return "", err + } + + v4Addr := "" + if tcpAddr.IP.To4() != nil { + v4Addr = tcpAddr.IP.To4().String() + } + port := tcpAddr.Port + + return fmt.Sprintf("%s:%d", v4Addr, port), nil +} diff --git a/manager/grpc_server.go b/manager/grpc_server.go index 453e240..8d17486 100644 --- a/manager/grpc_server.go +++ b/manager/grpc_server.go @@ -40,22 +40,26 @@ type GRPCServer struct { func NewGRPCServer(grpcAddr string, service management.ManagementServer, logger *zap.Logger) (*GRPCServer, error) { server := grpc.NewServer( - grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( - //grpc_ctxtags.StreamServerInterceptor(), - //grpc_opentracing.StreamServerInterceptor(), - grpc_prometheus.StreamServerInterceptor, - grpc_zap.StreamServerInterceptor(logger), - //grpc_auth.StreamServerInterceptor(myAuthFunction), - //grpc_recovery.StreamServerInterceptor(), - )), - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( - //grpc_ctxtags.UnaryServerInterceptor(), - //grpc_opentracing.UnaryServerInterceptor(), - grpc_prometheus.UnaryServerInterceptor, - grpc_zap.UnaryServerInterceptor(logger), - //grpc_auth.UnaryServerInterceptor(myAuthFunction), - //grpc_recovery.UnaryServerInterceptor(), - )), + grpc.StreamInterceptor( + grpc_middleware.ChainStreamServer( + //grpc_ctxtags.StreamServerInterceptor(), + //grpc_opentracing.StreamServerInterceptor(), + grpc_prometheus.StreamServerInterceptor, + grpc_zap.StreamServerInterceptor(logger), + //grpc_auth.StreamServerInterceptor(myAuthFunction), + //grpc_recovery.StreamServerInterceptor(), + ), + ), + grpc.UnaryInterceptor( + grpc_middleware.ChainUnaryServer( + //grpc_ctxtags.UnaryServerInterceptor(), + //grpc_opentracing.UnaryServerInterceptor(), + grpc_prometheus.UnaryServerInterceptor, + grpc_zap.UnaryServerInterceptor(logger), + //grpc_auth.UnaryServerInterceptor(myAuthFunction), + //grpc_recovery.UnaryServerInterceptor(), + ), + ), ) management.RegisterManagementServer(server, service) diff --git a/manager/grpc_service.go b/manager/grpc_service.go index c0745ae..c79f7ad 100644 --- a/manager/grpc_service.go +++ b/manager/grpc_service.go @@ -79,16 +79,28 @@ func (s *GRPCService) Stop() error { } func (s *GRPCService) getLeaderClient() (*GRPCClient, error) { + //leaderId, err := s.raftServer.LeaderID(10 * time.Second) + //if err != nil { + // return nil, err + //} + //client, exist := s.peerClients[string(leaderId)] + //if !exist { + // err := errors.New("there is no client for leader") + // s.logger.Error(err.Error()) + // return nil, err + //} + //return client, nil + for id, node := range s.cluster.Nodes { switch node.State { case management.Node_LEADER: - if client, exist := s.peerClients[id]; exist { - return client, nil - } + } + if client, exist := s.peerClients[id]; exist { + return client, nil } } - err := errors.New("there is no leader") + err := errors.New("there is no client for leader") s.logger.Error(err.Error()) return nil, err } @@ -284,12 +296,18 @@ func (s *GRPCService) NodeHealthCheck(ctx context.Context, req *management.NodeH resp := &management.NodeHealthCheckResponse{} switch req.Probe { + case management.NodeHealthCheckRequest_UNKNOWN: + fallthrough case management.NodeHealthCheckRequest_HEALTHINESS: resp.State = management.NodeHealthCheckResponse_HEALTHY case management.NodeHealthCheckRequest_LIVENESS: resp.State = management.NodeHealthCheckResponse_ALIVE case management.NodeHealthCheckRequest_READINESS: resp.State = management.NodeHealthCheckResponse_READY + default: + err := errors.New("unknown probe") + s.logger.Error(err.Error()) + return resp, status.Error(codes.InvalidArgument, err.Error()) } return resp, nil @@ -325,7 +343,8 @@ func (s *GRPCService) getPeerNode(id string) (*management.Node, error) { return nil, err } - node, err := s.peerClients[id].NodeInfo() + req := &empty.Empty{} + resp, err := s.peerClients[id].NodeInfo(req) if err != nil { s.logger.Debug(err.Error(), zap.String("id", id)) return &management.Node{ @@ -338,7 +357,7 @@ func (s *GRPCService) getPeerNode(id string) (*management.Node, error) { }, nil } - return node, nil + return resp.Node, nil } func (s *GRPCService) getNode(id string) (*management.Node, error) { @@ -377,7 +396,12 @@ func (s *GRPCService) setNode(node *management.Node) error { s.logger.Error(err.Error()) return err } - err = client.ClusterJoin(node) + + req := &management.ClusterJoinRequest{ + Node: node, + } + + _, err = client.ClusterJoin(req) if err != nil { s.logger.Error(err.Error()) return err @@ -413,7 +437,12 @@ func (s *GRPCService) deleteNode(id string) error { s.logger.Error(err.Error()) return err } - err = client.ClusterLeave(id) + + req := &management.ClusterLeaveRequest{ + Id: id, + } + + _, err = client.ClusterLeave(req) if err != nil { s.logger.Error(err.Error()) return err @@ -534,13 +563,13 @@ func (s *GRPCService) Set(ctx context.Context, req *management.SetRequest) (*emp resp := &empty.Empty{} - value, err := protobuf.MarshalAny(req.Value) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - if s.raftServer.IsLeader() { + value, err := protobuf.MarshalAny(req.Value) + if err != nil { + s.logger.Error(err.Error()) + return resp, status.Error(codes.Internal, err.Error()) + } + err = s.raftServer.SetValue(req.Key, value) if err != nil { s.logger.Error(err.Error()) @@ -558,7 +587,7 @@ func (s *GRPCService) Set(ctx context.Context, req *management.SetRequest) (*emp s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } - err = client.Set(req.Key, value) + resp, err = client.Set(req) if err != nil { s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) @@ -604,7 +633,7 @@ func (s *GRPCService) Delete(ctx context.Context, req *management.DeleteRequest) s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } - err = client.Delete(req.Key) + resp, err = client.Delete(req) if err != nil { switch err { case blasterrors.ErrNotFound: diff --git a/manager/http_handler.go b/manager/http_handler.go new file mode 100644 index 0000000..0ceb447 --- /dev/null +++ b/manager/http_handler.go @@ -0,0 +1,79 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manager + +import ( + "net/http" + "time" + + "github.com/gorilla/mux" + blasthttp "github.com/mosuka/blast/http" + "github.com/mosuka/blast/version" + "github.com/prometheus/client_golang/prometheus/promhttp" + "go.uber.org/zap" +) + +type Router struct { + mux.Router + + logger *zap.Logger +} + +func NewRouter(logger *zap.Logger) (*Router, error) { + router := &Router{ + logger: logger, + } + + router.StrictSlash(true) + + router.Handle("/", NewRootHandler(logger)).Methods("GET") + router.Handle("/metrics", promhttp.Handler()).Methods("GET") + + return router, nil +} + +func (r *Router) Close() error { + return nil +} + +type RootHandler struct { + logger *zap.Logger +} + +func NewRootHandler(logger *zap.Logger) *RootHandler { + return &RootHandler{ + logger: logger, + } +} + +func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + start := time.Now() + status := http.StatusOK + content := make([]byte, 0) + + defer blasthttp.RecordMetrics(start, status, w, r) + + msgMap := map[string]interface{}{ + "version": version.Version, + "status": status, + } + + content, err := blasthttp.NewJSONMessage(msgMap) + if err != nil { + h.logger.Error(err.Error()) + } + + blasthttp.WriteResponse(w, content, status, h.logger) +} diff --git a/manager/http_router.go b/manager/http_router.go deleted file mode 100644 index be7ca13..0000000 --- a/manager/http_router.go +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "encoding/json" - "io/ioutil" - "net/http" - "time" - - "github.com/gorilla/mux" - blasterrors "github.com/mosuka/blast/errors" - blasthttp "github.com/mosuka/blast/http" - "github.com/mosuka/blast/version" - "github.com/prometheus/client_golang/prometheus/promhttp" - "go.uber.org/zap" -) - -type Router struct { - mux.Router - - GRPCClient *GRPCClient - logger *zap.Logger -} - -func NewRouter(grpcAddr string, logger *zap.Logger) (*Router, error) { - grpcClient, err := NewGRPCClient(grpcAddr) - if err != nil { - return nil, err - } - - router := &Router{ - GRPCClient: grpcClient, - logger: logger, - } - - router.StrictSlash(true) - - router.Handle("/", NewRootHandler(logger)).Methods("GET") - router.Handle("/configs", NewPutHandler(router.GRPCClient, logger)).Methods("PUT") - router.Handle("/configs", NewGetHandler(router.GRPCClient, logger)).Methods("GET") - router.Handle("/configs", NewDeleteHandler(router.GRPCClient, logger)).Methods("DELETE") - router.Handle("/configs/{path:.*}", NewPutHandler(router.GRPCClient, logger)).Methods("PUT") - router.Handle("/configs/{path:.*}", NewGetHandler(router.GRPCClient, logger)).Methods("GET") - router.Handle("/configs/{path:.*}", NewDeleteHandler(router.GRPCClient, logger)).Methods("DELETE") - router.Handle("/metrics", promhttp.Handler()).Methods("GET") - - return router, nil -} - -func (r *Router) Close() error { - r.GRPCClient.Cancel() - - err := r.GRPCClient.Close() - if err != nil { - return err - } - - return nil -} - -type RootHandler struct { - logger *zap.Logger -} - -func NewRootHandler(logger *zap.Logger) *RootHandler { - return &RootHandler{ - logger: logger, - } -} - -func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - msgMap := map[string]interface{}{ - "version": version.Version, - "status": status, - } - - content, err := blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type GetHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewGetHandler(client *GRPCClient, logger *zap.Logger) *GetHandler { - return &GetHandler{ - client: client, - logger: logger, - } -} - -func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - vars := mux.Vars(r) - - key := vars["path"] - - value, err := h.client.Get(key) - if err != nil { - switch err { - case blasterrors.ErrNotFound: - status = http.StatusNotFound - default: - status = http.StatusInternalServerError - } - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // interface{} -> []byte - content, err = json.MarshalIndent(value, "", " ") - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type PutHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewPutHandler(client *GRPCClient, logger *zap.Logger) *PutHandler { - return &PutHandler{ - client: client, - logger: logger, - } -} - -func (h *PutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - vars := mux.Vars(r) - - key := vars["path"] - - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - // string -> map[string]interface{} - var value interface{} - err = json.Unmarshal(bodyBytes, &value) - if err != nil { - status = http.StatusBadRequest - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - err = h.client.Set(key, value) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} - -type DeleteHandler struct { - client *GRPCClient - logger *zap.Logger -} - -func NewDeleteHandler(client *GRPCClient, logger *zap.Logger) *DeleteHandler { - return &DeleteHandler{ - client: client, - logger: logger, - } -} - -func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - vars := mux.Vars(r) - - key := vars["path"] - - err := h.client.Delete(key) - if err != nil { - status = http.StatusInternalServerError - - msgMap := map[string]interface{}{ - "message": err.Error(), - "status": status, - } - - content, err = blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) - return - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} diff --git a/manager/http_server.go b/manager/http_server.go index f64b7c6..33bd0fc 100644 --- a/manager/http_server.go +++ b/manager/http_server.go @@ -15,7 +15,6 @@ package manager import ( - "fmt" "net" "net/http" @@ -68,18 +67,3 @@ func (s *HTTPServer) Stop() error { return nil } - -func (s *HTTPServer) GetAddress() (string, error) { - tcpAddr, err := net.ResolveTCPAddr("tcp", s.listener.Addr().String()) - if err != nil { - return "", err - } - - v4Addr := "" - if tcpAddr.IP.To4() != nil { - v4Addr = tcpAddr.IP.To4().String() - } - port := tcpAddr.Port - - return fmt.Sprintf("%s:%d", v4Addr, port), nil -} diff --git a/manager/raft_command.go b/manager/raft_command.go deleted file mode 100644 index 97fa3df..0000000 --- a/manager/raft_command.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import "encoding/json" - -type command int - -const ( - unknown command = iota - setNode - deleteNode - setKeyValue - deleteKeyValue -) - -type message struct { - Command command `json:"command,omitempty"` - Data json.RawMessage `json:"data,omitempty"` -} - -func newMessage(cmd command, data interface{}) (*message, error) { - b, err := json.Marshal(data) - if err != nil { - return nil, err - } - return &message{ - Command: cmd, - Data: b, - }, nil -} diff --git a/manager/raft_fsm.go b/manager/raft_fsm.go index 325042d..bfd859f 100644 --- a/manager/raft_fsm.go +++ b/manager/raft_fsm.go @@ -21,9 +21,11 @@ import ( "io/ioutil" "sync" + "github.com/gogo/protobuf/proto" "github.com/hashicorp/raft" blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/maputils" + "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/management" "go.uber.org/zap" ) @@ -95,6 +97,7 @@ func (f *RaftFSM) DeleteNode(nodeId string) error { } func (f *RaftFSM) GetValue(key string) (interface{}, error) { + // get raw data value, err := f.data.Get(key) if err != nil { switch err { @@ -107,15 +110,7 @@ func (f *RaftFSM) GetValue(key string) (interface{}, error) { } } - var ret interface{} - switch value.(type) { - case maputils.Map: - ret = value.(maputils.Map).ToMap() - default: - ret = value - } - - return ret, nil + return value, nil } func (f *RaftFSM) SetValue(key string, value interface{}, merge bool) error { @@ -157,65 +152,47 @@ type fsmResponse struct { } func (f *RaftFSM) Apply(l *raft.Log) interface{} { - var msg message - err := json.Unmarshal(l.Data, &msg) + proposal := &management.Proposal{} + err := proto.Unmarshal(l.Data, proposal) if err != nil { f.logger.Error(err.Error()) return err } - switch msg.Command { - case setNode: - var data map[string]interface{} - err := json.Unmarshal(msg.Data, &data) + switch proposal.Event { + case management.Proposal_SET_NODE: + err = f.SetNode(proposal.Node) if err != nil { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - b, err := json.Marshal(data["node"]) + return &fsmResponse{error: nil} + case management.Proposal_DELETE_NODE: + err = f.DeleteNode(proposal.Node.Id) if err != nil { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - var node *management.Node - err = json.Unmarshal(b, &node) + return &fsmResponse{error: nil} + case management.Proposal_SET_VALUE: + value, err := protobuf.MarshalAny(proposal.KeyValue.Value) if err != nil { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - err = f.SetNode(node) + err = f.SetValue(proposal.KeyValue.Key, value, false) if err != nil { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - return &fsmResponse{error: err} - case deleteNode: - var data map[string]interface{} - err := json.Unmarshal(msg.Data, &data) + return &fsmResponse{error: nil} + case management.Proposal_DELETE_VALUE: + err = f.DeleteValue(proposal.KeyValue.Key) if err != nil { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - err = f.DeleteNode(data["id"].(string)) - return &fsmResponse{error: err} - case setKeyValue: - var data map[string]interface{} - err := json.Unmarshal(msg.Data, &data) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - err = f.SetValue(data["key"].(string), data["value"], false) - return &fsmResponse{error: err} - case deleteKeyValue: - var data map[string]interface{} - err := json.Unmarshal(msg.Data, &data) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - err = f.DeleteValue(data["key"].(string)) - return &fsmResponse{error: err} + return &fsmResponse{error: nil} default: err = errors.New("unsupported command") f.logger.Error(err.Error()) diff --git a/manager/raft_fsm_test.go b/manager/raft_fsm_test.go index ca0ad48..86f70ba 100644 --- a/manager/raft_fsm_test.go +++ b/manager/raft_fsm_test.go @@ -20,6 +20,7 @@ import ( "reflect" "testing" + "github.com/google/go-cmp/cmp" "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/protobuf/management" ) @@ -360,7 +361,7 @@ func TestRaftFSM_Get(t *testing.T) { expectedValue := 1 actualValue := value - if expectedValue != actualValue { + if !cmp.Equal(expectedValue, actualValue) { t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) } } @@ -395,9 +396,7 @@ func TestRaftFSM_Set(t *testing.T) { } // set {"a": 1} - err = fsm.SetValue("/", map[string]interface{}{ - "a": 1, - }, false) + err = fsm.SetValue("/", map[string]interface{}{"a": 1}, false) if err != nil { t.Fatalf("%v", err) } @@ -405,36 +404,26 @@ func TestRaftFSM_Set(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - exp1 := map[string]interface{}{ - "a": 1, - } + exp1 := map[string]interface{}{"a": 1} act1 := val1 if !reflect.DeepEqual(exp1, act1) { t.Fatalf("expected content to see %v, saw %v", exp1, act1) } // merge {"a": "A"} - _ = fsm.SetValue("/", map[string]interface{}{ - "a": "A", - }, true) + _ = fsm.SetValue("/", map[string]interface{}{"a": "A"}, true) val2, err := fsm.GetValue("/") if err != nil { t.Fatalf("%v", err) } - exp2 := map[string]interface{}{ - "a": "A", - } + exp2 := map[string]interface{}{"a": "A"} act2 := val2 if !reflect.DeepEqual(exp2, act2) { t.Fatalf("expected content to see %v, saw %v", exp2, act2) } // set {"a": {"b": "AB"}} - err = fsm.SetValue("/", map[string]interface{}{ - "a": map[string]interface{}{ - "b": "AB", - }, - }, false) + err = fsm.SetValue("/", map[string]interface{}{"a": map[string]interface{}{"b": "AB"}}, false) if err != nil { t.Fatalf("%v", err) } @@ -443,22 +432,14 @@ func TestRaftFSM_Set(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - exp3 := map[string]interface{}{ - "a": map[string]interface{}{ - "b": "AB", - }, - } + exp3 := map[string]interface{}{"a": map[string]interface{}{"b": "AB"}} act3 := val3 if !reflect.DeepEqual(exp3, act3) { t.Fatalf("expected content to see %v, saw %v", exp3, act3) } // merge {"a": {"c": "AC"}} - err = fsm.SetValue("/", map[string]interface{}{ - "a": map[string]interface{}{ - "c": "AC", - }, - }, true) + err = fsm.SetValue("/", map[string]interface{}{"a": map[string]interface{}{"c": "AC"}}, true) if err != nil { t.Fatalf("%v", err) } @@ -466,21 +447,14 @@ func TestRaftFSM_Set(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - exp4 := map[string]interface{}{ - "a": map[string]interface{}{ - "b": "AB", - "c": "AC", - }, - } + exp4 := map[string]interface{}{"a": map[string]interface{}{"b": "AB", "c": "AC"}} act4 := val4 if !reflect.DeepEqual(exp4, act4) { t.Fatalf("expected content to see %v, saw %v", exp4, act4) } // set {"a": 1} - err = fsm.SetValue("/", map[string]interface{}{ - "a": 1, - }, false) + err = fsm.SetValue("/", map[string]interface{}{"a": 1}, false) if err != nil { t.Fatalf("%v", err) } @@ -488,9 +462,7 @@ func TestRaftFSM_Set(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - exp5 := map[string]interface{}{ - "a": 1, - } + exp5 := map[string]interface{}{"a": 1} act5 := val5 if !reflect.DeepEqual(exp5, act5) { t.Fatalf("expected content to see %v, saw %v", exp5, act5) @@ -546,6 +518,7 @@ func TestRaftFSM_Delete(t *testing.T) { t.Fatalf("%v", err) } + // set {"a": 1} err = fsm.SetValue("/", map[string]interface{}{"a": 1}, false) if err != nil { t.Fatalf("%v", err) @@ -558,7 +531,7 @@ func TestRaftFSM_Delete(t *testing.T) { expectedValue := 1 actualValue := value - if expectedValue != actualValue { + if !cmp.Equal(expectedValue, actualValue) { t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) } diff --git a/manager/raft_server.go b/manager/raft_server.go index 46bc079..c169e31 100644 --- a/manager/raft_server.go +++ b/manager/raft_server.go @@ -25,11 +25,14 @@ import ( "time" "github.com/blevesearch/bleve/mapping" + "github.com/gogo/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" "github.com/hashicorp/raft" raftboltdb "github.com/hashicorp/raft-boltdb" raftbadgerdb "github.com/markthethomas/raft-badger" _ "github.com/mosuka/blast/builtins" blasterrors "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/management" "go.uber.org/zap" //raftmdb "github.com/hashicorp/raft-mdb" @@ -354,24 +357,17 @@ func (s *RaftServer) getNode(nodeId string) (*management.Node, error) { } func (s *RaftServer) setNode(node *management.Node) error { - msg, err := newMessage( - setNode, - map[string]interface{}{ - "node": node, - }, - ) - if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) - return err + proposal := &management.Proposal{ + Event: management.Proposal_SET_NODE, + Node: node, } - - msgBytes, err := json.Marshal(msg) + proposalByte, err := proto.Marshal(proposal) if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) + s.logger.Error(err.Error()) return err } - f := s.raft.Apply(msgBytes, 10*time.Second) + f := s.raft.Apply(proposalByte, 10*time.Second) err = f.Error() if err != nil { s.logger.Error(err.Error(), zap.Any("node", node)) @@ -387,24 +383,19 @@ func (s *RaftServer) setNode(node *management.Node) error { } func (s *RaftServer) deleteNode(nodeId string) error { - msg, err := newMessage( - deleteNode, - map[string]interface{}{ - "id": nodeId, + proposal := &management.Proposal{ + Event: management.Proposal_DELETE_NODE, + Node: &management.Node{ + Id: nodeId, }, - ) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err } - - msgBytes, err := json.Marshal(msg) + proposalByte, err := proto.Marshal(proposal) if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) + s.logger.Error(err.Error()) return err } - f := s.raft.Apply(msgBytes, 10*time.Second) + f := s.raft.Apply(proposalByte, 10*time.Second) err = f.Error() if err != nil { s.logger.Error(err.Error(), zap.String("id", nodeId)) @@ -577,25 +568,27 @@ func (s *RaftServer) SetValue(key string, value interface{}) error { return raft.ErrNotLeader } - msg, err := newMessage( - setKeyValue, - map[string]interface{}{ - "key": key, - "value": value, - }, - ) + valueAny := &any.Any{} + err := protobuf.UnmarshalAny(value, valueAny) if err != nil { s.logger.Error(err.Error()) return err } - msgBytes, err := json.Marshal(msg) + proposal := &management.Proposal{ + Event: management.Proposal_SET_VALUE, + KeyValue: &management.KeyValue{ + Key: key, + Value: valueAny, + }, + } + proposalByte, err := proto.Marshal(proposal) if err != nil { s.logger.Error(err.Error()) return err } - f := s.raft.Apply(msgBytes, 10*time.Second) + f := s.raft.Apply(proposalByte, 10*time.Second) err = f.Error() if err != nil { s.logger.Error(err.Error()) @@ -616,24 +609,19 @@ func (s *RaftServer) DeleteValue(key string) error { return raft.ErrNotLeader } - msg, err := newMessage( - deleteKeyValue, - map[string]interface{}{ - "key": key, + proposal := &management.Proposal{ + Event: management.Proposal_DELETE_VALUE, + KeyValue: &management.KeyValue{ + Key: key, }, - ) - if err != nil { - s.logger.Error(err.Error()) - return err } - - msgBytes, err := json.Marshal(msg) + proposalByte, err := proto.Marshal(proposal) if err != nil { s.logger.Error(err.Error()) return err } - f := s.raft.Apply(msgBytes, 10*time.Second) + f := s.raft.Apply(proposalByte, 10*time.Second) err = f.Error() if err != nil { s.logger.Error(err.Error()) diff --git a/manager/server.go b/manager/server.go index 809d3c4..909b4fc 100644 --- a/manager/server.go +++ b/manager/server.go @@ -36,6 +36,7 @@ type Server struct { raftServer *RaftServer grpcService *GRPCService grpcServer *GRPCServer + grpcGateway *GRPCGateway httpRouter *Router httpServer *HTTPServer } @@ -83,8 +84,15 @@ func (s *Server) Start() { return } + // create gRPC gateway + s.grpcGateway, err = NewGRPCGateway(s.node.Metadata.GrpcGatewayAddress, s.node.Metadata.GrpcAddress, s.logger) + if err != nil { + s.logger.Error(err.Error()) + return + } + // create HTTP router - s.httpRouter, err = NewRouter(s.node.Metadata.GrpcAddress, s.logger) + s.httpRouter, err = NewRouter(s.logger) if err != nil { s.logger.Fatal(err.Error()) return @@ -93,7 +101,7 @@ func (s *Server) Start() { // create HTTP server s.httpServer, err = NewHTTPServer(s.node.Metadata.HttpAddress, s.httpRouter, s.logger, s.httpLogger) if err != nil { - s.logger.Error(err.Error()) + s.logger.Fatal(err.Error()) return } @@ -125,6 +133,12 @@ func (s *Server) Start() { } }() + // start gRPC gateway + s.logger.Info("start gRPC gateway") + go func() { + _ = s.grpcGateway.Start() + }() + // start HTTP server s.logger.Info("start HTTP server") go func() { @@ -145,7 +159,11 @@ func (s *Server) Start() { return } - err = client.ClusterJoin(s.node) + req := &management.ClusterJoinRequest{ + Node: s.node, + } + + _, err = client.ClusterJoin(req) if err != nil { s.logger.Fatal(err.Error()) return @@ -160,11 +178,18 @@ func (s *Server) Stop() { s.logger.Error(err.Error()) } + s.logger.Info("stop HTTP router") err = s.httpRouter.Close() if err != nil { s.logger.Error(err.Error()) } + s.logger.Info("stop gRPC gateway") + err = s.grpcGateway.Stop() + if err != nil { + s.logger.Error(err.Error()) + } + s.logger.Info("stop gRPC server") err = s.grpcServer.Stop() if err != nil { @@ -198,7 +223,7 @@ func (s *Server) GrpcAddress() string { } func (s *Server) HttpAddress() string { - address, err := s.httpServer.GetAddress() + address, err := s.grpcGateway.GetAddress() if err != nil { return "" } diff --git a/manager/server_test.go b/manager/server_test.go index 855a0f1..0b863c3 100644 --- a/manager/server_test.go +++ b/manager/server_test.go @@ -22,9 +22,12 @@ import ( "testing" "time" - blasterrors "github.com/mosuka/blast/errors" + "github.com/golang/protobuf/ptypes/any" + "github.com/golang/protobuf/ptypes/empty" + "github.com/google/go-cmp/cmp" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/logutils" + "github.com/mosuka/blast/protobuf" "github.com/mosuka/blast/protobuf/management" "github.com/mosuka/blast/strutils" "github.com/mosuka/blast/testutils" @@ -39,6 +42,7 @@ func TestServer_Start(t *testing.T) { peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -53,8 +57,9 @@ func TestServer_Start(t *testing.T) { BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -92,6 +97,7 @@ func TestServer_HealthCheck(t *testing.T) { peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -106,8 +112,9 @@ func TestServer_HealthCheck(t *testing.T) { BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -150,34 +157,37 @@ func TestServer_HealthCheck(t *testing.T) { } // healthiness - healthiness, err := client.NodeHealthCheck(management.NodeHealthCheckRequest_HEALTHINESS.String()) + reqHealthiness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_HEALTHINESS} + resHealthiness, err := client.NodeHealthCheck(reqHealthiness) if err != nil { t.Fatalf("%v", err) } - expHealthiness := management.NodeHealthCheckResponse_HEALTHY.String() - actHealthiness := healthiness + expHealthiness := management.NodeHealthCheckResponse_HEALTHY + actHealthiness := resHealthiness.State if expHealthiness != actHealthiness { t.Fatalf("expected content to see %v, saw %v", expHealthiness, actHealthiness) } // liveness - liveness, err := client.NodeHealthCheck(management.NodeHealthCheckRequest_LIVENESS.String()) + reqLiveness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_LIVENESS} + resLiveness, err := client.NodeHealthCheck(reqLiveness) if err != nil { t.Fatalf("%v", err) } - expLiveness := management.NodeHealthCheckResponse_ALIVE.String() - actLiveness := liveness + expLiveness := management.NodeHealthCheckResponse_ALIVE + actLiveness := resLiveness.State if expLiveness != actLiveness { t.Fatalf("expected content to see %v, saw %v", expLiveness, actLiveness) } // readiness - readiness, err := client.NodeHealthCheck(management.NodeHealthCheckRequest_READINESS.String()) + reqReadiness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_READINESS} + resReadiness, err := client.NodeHealthCheck(reqReadiness) if err != nil { t.Fatalf("%v", err) } - expReadiness := management.NodeHealthCheckResponse_READY.String() - actReadiness := readiness + expReadiness := management.NodeHealthCheckResponse_READY + actReadiness := resReadiness.State if expReadiness != actReadiness { t.Fatalf("expected content to see %v, saw %v", expReadiness, actReadiness) } @@ -192,6 +202,7 @@ func TestServer_GetNode(t *testing.T) { peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewawyAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -206,8 +217,9 @@ func TestServer_GetNode(t *testing.T) { BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewawyAddress, + HttpAddress: httpAddress, }, } @@ -250,7 +262,7 @@ func TestServer_GetNode(t *testing.T) { } // get node - nodeInfo, err := client.NodeInfo() + res, err := client.NodeInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -259,11 +271,12 @@ func TestServer_GetNode(t *testing.T) { BindAddress: bindAddress, State: management.Node_LEADER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewawyAddress, + HttpAddress: httpAddress, }, } - actNodeInfo := nodeInfo + actNodeInfo := res.Node if !reflect.DeepEqual(expNodeInfo, actNodeInfo) { t.Fatalf("expected content to see %v, saw %v", expNodeInfo, actNodeInfo) } @@ -278,6 +291,7 @@ func TestServer_GetCluster(t *testing.T) { peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -292,8 +306,9 @@ func TestServer_GetCluster(t *testing.T) { BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -336,7 +351,7 @@ func TestServer_GetCluster(t *testing.T) { } // get cluster - cluster, err := client.ClusterInfo() + res, err := client.ClusterInfo(&empty.Empty{}) if err != nil { t.Fatalf("%v", err) } @@ -347,19 +362,20 @@ func TestServer_GetCluster(t *testing.T) { BindAddress: bindAddress, State: management.Node_LEADER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, }, }, } - actCluster := cluster + actCluster := res.Cluster if !reflect.DeepEqual(expCluster, actCluster) { t.Fatalf("expected content to see %v, saw %v", expCluster, actCluster) } } -func TestServer_SetState(t *testing.T) { +func TestServer_Set(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -368,6 +384,7 @@ func TestServer_SetState(t *testing.T) { peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -382,8 +399,9 @@ func TestServer_SetState(t *testing.T) { BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -426,27 +444,40 @@ func TestServer_SetState(t *testing.T) { } // set value - err = client.Set("test/key1", "val1") + valueAny := &any.Any{} + err = protobuf.UnmarshalAny("val1", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq := &management.SetRequest{ + Key: "test/key1", + Value: valueAny, + } + _, err = client.Set(setReq) if err != nil { t.Fatalf("%v", err) } // get value - val1, err := client.Get("test/key1") + getReq := &management.GetRequest{ + Key: "test/key1", + } + getRes, err := client.Get(getReq) if err != nil { t.Fatalf("%v", err) } expVal1 := "val1" + val1, err := protobuf.MarshalAny(getRes.Value) actVal1 := *val1.(*string) - if expVal1 != actVal1 { + if !cmp.Equal(expVal1, actVal1) { t.Fatalf("expected content to see %v, saw %v", expVal1, actVal1) } } -func TestServer_GetState(t *testing.T) { +func TestServer_Get(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -455,6 +486,7 @@ func TestServer_GetState(t *testing.T) { peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -469,8 +501,9 @@ func TestServer_GetState(t *testing.T) { BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -513,27 +546,38 @@ func TestServer_GetState(t *testing.T) { } // set value - err = client.Set("test/key1", "val1") + valueAny := &any.Any{} + err = protobuf.UnmarshalAny("val1", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq := &management.SetRequest{ + Key: "test/key1", + Value: valueAny, + } + _, err = client.Set(setReq) if err != nil { t.Fatalf("%v", err) } // get value - val1, err := client.Get("test/key1") + getReq := &management.GetRequest{Key: "test/key1"} + getRes, err := client.Get(getReq) if err != nil { t.Fatalf("%v", err) } expVal1 := "val1" + val1, err := protobuf.MarshalAny(getRes.Value) actVal1 := *val1.(*string) - if expVal1 != actVal1 { + if !cmp.Equal(expVal1, actVal1) { t.Fatalf("expected content to see %v, saw %v", expVal1, actVal1) } } -func TestServer_DeleteState(t *testing.T) { +func TestServer_Delete(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -542,6 +586,7 @@ func TestServer_DeleteState(t *testing.T) { peerGrpcAddress := "" grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -556,8 +601,9 @@ func TestServer_DeleteState(t *testing.T) { BindAddress: bindAddress, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - HttpAddress: httpAddress, + GrpcAddress: grpcAddress, + GrpcGatewayAddress: grpcGatewayAddress, + HttpAddress: httpAddress, }, } @@ -600,43 +646,53 @@ func TestServer_DeleteState(t *testing.T) { } // set value - err = client.Set("test/key1", "val1") + valueAny := &any.Any{} + if err != nil { + t.Fatalf("%v", err) + } + err = protobuf.UnmarshalAny("val1", valueAny) + setReq := &management.SetRequest{ + Key: "test/key1", + Value: valueAny, + } + _, err = client.Set(setReq) if err != nil { t.Fatalf("%v", err) } // get value - val1, err := client.Get("test/key1") + getReq := &management.GetRequest{ + Key: "test/key1", + } + res, err := client.Get(getReq) if err != nil { t.Fatalf("%v", err) } expVal1 := "val1" + val1, err := protobuf.MarshalAny(res.Value) actVal1 := *val1.(*string) - if expVal1 != actVal1 { + if !cmp.Equal(expVal1, actVal1) { t.Fatalf("expected content to see %v, saw %v", expVal1, actVal1) } // delete value - err = client.Delete("test/key1") - if err != nil { - t.Fatalf("%v", err) + deleteReq := &management.DeleteRequest{ + Key: "test/key1", } - - val1, err = client.Get("test/key1") - if err != blasterrors.ErrNotFound { - t.Fatalf("%v", err) - } - - if val1 != nil { + _, err = client.Delete(deleteReq) + if err != nil { t.Fatalf("%v", err) } // delete non-existing data - err = client.Delete("test/non-existing") - if err != blasterrors.ErrNotFound { + deleteNonExistingReq := &management.DeleteRequest{ + Key: "test/non-existing", + } + _, err = client.Delete(deleteNonExistingReq) + if err != nil { t.Fatalf("%v", err) } } @@ -650,6 +706,7 @@ func TestCluster_Start(t *testing.T) { peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -664,8 +721,9 @@ func TestCluster_Start(t *testing.T) { BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -692,6 +750,7 @@ func TestCluster_Start(t *testing.T) { peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -706,8 +765,9 @@ func TestCluster_Start(t *testing.T) { BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -734,6 +794,7 @@ func TestCluster_Start(t *testing.T) { peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -748,8 +809,9 @@ func TestCluster_Start(t *testing.T) { BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -787,6 +849,7 @@ func TestCluster_HealthCheck(t *testing.T) { peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -801,8 +864,9 @@ func TestCluster_HealthCheck(t *testing.T) { BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -829,6 +893,7 @@ func TestCluster_HealthCheck(t *testing.T) { peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -843,8 +908,9 @@ func TestCluster_HealthCheck(t *testing.T) { BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -871,6 +937,7 @@ func TestCluster_HealthCheck(t *testing.T) { peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -885,8 +952,9 @@ func TestCluster_HealthCheck(t *testing.T) { BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -937,101 +1005,105 @@ func TestCluster_HealthCheck(t *testing.T) { t.Fatalf("%v", err) } + reqHealtiness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_HEALTHINESS} + reqLiveness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_LIVENESS} + reqReadiness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_READINESS} + // healthiness - healthiness1, err := client1.NodeHealthCheck(management.NodeHealthCheckRequest_HEALTHINESS.String()) + resHealthiness1, err := client1.NodeHealthCheck(reqHealtiness) if err != nil { t.Fatalf("%v", err) } - expHealthiness1 := management.NodeHealthCheckResponse_HEALTHY.String() - actHealthiness1 := healthiness1 + expHealthiness1 := management.NodeHealthCheckResponse_HEALTHY + actHealthiness1 := resHealthiness1.State if expHealthiness1 != actHealthiness1 { t.Fatalf("expected content to see %v, saw %v", expHealthiness1, actHealthiness1) } // liveness - liveness1, err := client1.NodeHealthCheck(management.NodeHealthCheckRequest_LIVENESS.String()) + resLiveness1, err := client1.NodeHealthCheck(reqLiveness) if err != nil { t.Fatalf("%v", err) } - expLiveness1 := management.NodeHealthCheckResponse_ALIVE.String() - actLiveness1 := liveness1 + expLiveness1 := management.NodeHealthCheckResponse_ALIVE + actLiveness1 := resLiveness1.State if expLiveness1 != actLiveness1 { t.Fatalf("expected content to see %v, saw %v", expLiveness1, actLiveness1) } // readiness - readiness1, err := client1.NodeHealthCheck(management.NodeHealthCheckRequest_READINESS.String()) + resReadiness1, err := client1.NodeHealthCheck(reqReadiness) if err != nil { t.Fatalf("%v", err) } - expReadiness1 := management.NodeHealthCheckResponse_READY.String() - actReadiness1 := readiness1 + expReadiness1 := management.NodeHealthCheckResponse_READY + actReadiness1 := resReadiness1.State if expReadiness1 != actReadiness1 { t.Fatalf("expected content to see %v, saw %v", expReadiness1, actReadiness1) } // healthiness - healthiness2, err := client2.NodeHealthCheck(management.NodeHealthCheckRequest_HEALTHINESS.String()) + resHealthiness2, err := client2.NodeHealthCheck(reqHealtiness) if err != nil { t.Fatalf("%v", err) } - expHealthiness2 := management.NodeHealthCheckResponse_HEALTHY.String() - actHealthiness2 := healthiness2 + expHealthiness2 := management.NodeHealthCheckResponse_HEALTHY + actHealthiness2 := resHealthiness2.State if expHealthiness2 != actHealthiness2 { t.Fatalf("expected content to see %v, saw %v", expHealthiness2, actHealthiness2) } // liveness - liveness2, err := client2.NodeHealthCheck(management.NodeHealthCheckRequest_LIVENESS.String()) + resLiveness2, err := client2.NodeHealthCheck(reqLiveness) if err != nil { t.Fatalf("%v", err) } - expLiveness2 := management.NodeHealthCheckResponse_ALIVE.String() - actLiveness2 := liveness2 + expLiveness2 := management.NodeHealthCheckResponse_ALIVE + actLiveness2 := resLiveness2.State if expLiveness2 != actLiveness2 { t.Fatalf("expected content to see %v, saw %v", expLiveness2, actLiveness2) } // readiness - readiness2, err := client2.NodeHealthCheck(management.NodeHealthCheckRequest_READINESS.String()) + resReadiness2, err := client2.NodeHealthCheck(reqReadiness) if err != nil { t.Fatalf("%v", err) } - expReadiness2 := management.NodeHealthCheckResponse_READY.String() - actReadiness2 := readiness2 + expReadiness2 := management.NodeHealthCheckResponse_READY + actReadiness2 := resReadiness2.State if expReadiness2 != actReadiness2 { t.Fatalf("expected content to see %v, saw %v", expReadiness2, actReadiness2) } // healthiness - healthiness3, err := client3.NodeHealthCheck(management.NodeHealthCheckRequest_HEALTHINESS.String()) + resHealthiness3, err := client3.NodeHealthCheck(reqHealtiness) if err != nil { t.Fatalf("%v", err) } - expHealthiness3 := management.NodeHealthCheckResponse_HEALTHY.String() - actHealthiness3 := healthiness3 + expHealthiness3 := management.NodeHealthCheckResponse_HEALTHY + actHealthiness3 := resHealthiness3.State if expHealthiness3 != actHealthiness3 { t.Fatalf("expected content to see %v, saw %v", expHealthiness3, actHealthiness3) } // liveness - liveness3, err := client3.NodeHealthCheck(management.NodeHealthCheckRequest_LIVENESS.String()) + resLiveness3, err := client3.NodeHealthCheck(reqLiveness) if err != nil { t.Fatalf("%v", err) } - expLiveness3 := management.NodeHealthCheckResponse_ALIVE.String() - actLiveness3 := liveness3 + expLiveness3 := management.NodeHealthCheckResponse_ALIVE + actLiveness3 := resLiveness3.State if expLiveness3 != actLiveness3 { t.Fatalf("expected content to see %v, saw %v", expLiveness3, actLiveness3) } // readiness - readiness3, err := client3.NodeHealthCheck(management.NodeHealthCheckRequest_READINESS.String()) + resReadiness3, err := client3.NodeHealthCheck(reqReadiness) if err != nil { t.Fatalf("%v", err) } - expReadiness3 := management.NodeHealthCheckResponse_READY.String() - actReadiness3 := readiness3 + expReadiness3 := management.NodeHealthCheckResponse_READY + actReadiness3 := resReadiness3.State if expReadiness3 != actReadiness3 { t.Fatalf("expected content to see %v, saw %v", expReadiness3, actReadiness3) } @@ -1046,6 +1118,7 @@ func TestCluster_GetNode(t *testing.T) { peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1060,8 +1133,9 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -1088,6 +1162,7 @@ func TestCluster_GetNode(t *testing.T) { peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1102,8 +1177,9 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -1130,6 +1206,7 @@ func TestCluster_GetNode(t *testing.T) { peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1144,8 +1221,9 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -1197,7 +1275,8 @@ func TestCluster_GetNode(t *testing.T) { } // get all node info from all nodes - node11, err := client1.NodeInfo() + req := &empty.Empty{} + resNodeInfo11, err := client1.NodeInfo(req) if err != nil { t.Fatalf("%v", err) } @@ -1206,16 +1285,17 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress1, State: management.Node_LEADER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } - actNode11 := node11 + actNode11 := resNodeInfo11.Node if !reflect.DeepEqual(expNode11, actNode11) { t.Fatalf("expected content to see %v, saw %v", expNode11, actNode11) } - node21, err := client2.NodeInfo() + resNodeInfo21, err := client2.NodeInfo(req) if err != nil { t.Fatalf("%v", err) } @@ -1224,16 +1304,17 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress2, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } - actNode21 := node21 + actNode21 := resNodeInfo21.Node if !reflect.DeepEqual(expNode21, actNode21) { t.Fatalf("expected content to see %v, saw %v", expNode21, actNode21) } - node31, err := client3.NodeInfo() + resNodeInfo31, err := client3.NodeInfo(req) if err != nil { t.Fatalf("%v", err) } @@ -1242,11 +1323,12 @@ func TestCluster_GetNode(t *testing.T) { BindAddress: bindAddress3, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } - actNode31 := node31 + actNode31 := resNodeInfo31.Node if !reflect.DeepEqual(expNode31, actNode31) { t.Fatalf("expected content to see %v, saw %v", expNode31, actNode31) } @@ -1261,6 +1343,7 @@ func TestCluster_GetCluster(t *testing.T) { peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1275,8 +1358,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -1303,6 +1387,7 @@ func TestCluster_GetCluster(t *testing.T) { peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1317,8 +1402,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -1345,6 +1431,7 @@ func TestCluster_GetCluster(t *testing.T) { peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) @@ -1359,8 +1446,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -1412,7 +1500,8 @@ func TestCluster_GetCluster(t *testing.T) { } // get cluster info from manager1 - cluster1, err := client1.ClusterInfo() + req := &empty.Empty{} + resClusterInfo1, err := client1.ClusterInfo(req) if err != nil { t.Fatalf("%v", err) } @@ -1423,8 +1512,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress1, State: management.Node_LEADER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, }, nodeId2: { @@ -1432,8 +1522,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress2, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, }, nodeId3: { @@ -1441,18 +1532,19 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress3, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, }, }, } - actCluster1 := cluster1 + actCluster1 := resClusterInfo1.Cluster if !reflect.DeepEqual(expCluster1, actCluster1) { t.Fatalf("expected content to see %v, saw %v", expCluster1, actCluster1) } - cluster2, err := client2.ClusterInfo() + resClusterInfo2, err := client2.ClusterInfo(req) if err != nil { t.Fatalf("%v", err) } @@ -1463,8 +1555,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress1, State: management.Node_LEADER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, }, nodeId2: { @@ -1472,8 +1565,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress2, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, }, nodeId3: { @@ -1481,18 +1575,19 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress3, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, }, }, } - actCluster2 := cluster2 + actCluster2 := resClusterInfo2.Cluster if !reflect.DeepEqual(expCluster2, actCluster2) { t.Fatalf("expected content to see %v, saw %v", expCluster2, actCluster2) } - cluster3, err := client3.ClusterInfo() + resClusterInfo3, err := client3.ClusterInfo(req) if err != nil { t.Fatalf("%v", err) } @@ -1503,8 +1598,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress1, State: management.Node_LEADER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, }, nodeId2: { @@ -1512,8 +1608,9 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress2, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, }, nodeId3: { @@ -1521,19 +1618,20 @@ func TestCluster_GetCluster(t *testing.T) { BindAddress: bindAddress3, State: management.Node_FOLLOWER, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, }, }, } - actCluster3 := cluster3 + actCluster3 := resClusterInfo3.Cluster if !reflect.DeepEqual(expCluster3, actCluster3) { t.Fatalf("expected content to see %v, saw %v", expCluster3, actCluster3) } } -func TestCluster_SetState(t *testing.T) { +func TestCluster_Set(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -1542,8 +1640,9 @@ func TestCluster_SetState(t *testing.T) { peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + nodeId1 := "node-1" bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() defer func() { @@ -1556,8 +1655,9 @@ func TestCluster_SetState(t *testing.T) { BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -1582,10 +1682,14 @@ func TestCluster_SetState(t *testing.T) { // start server server1.Start() + // sleep + time.Sleep(5 * time.Second) + peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + nodeId2 := "node-2" bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() defer func() { @@ -1598,8 +1702,9 @@ func TestCluster_SetState(t *testing.T) { BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -1624,10 +1729,14 @@ func TestCluster_SetState(t *testing.T) { // start server server2.Start() + // sleep + time.Sleep(5 * time.Second) + peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + nodeId3 := "node-3" bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() defer func() { @@ -1640,8 +1749,9 @@ func TestCluster_SetState(t *testing.T) { BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -1692,113 +1802,185 @@ func TestCluster_SetState(t *testing.T) { t.Fatalf("%v", err) } - err = client1.Set("test/key1", "val1") + valueAny := &any.Any{} + err = protobuf.UnmarshalAny("val1", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq1 := &management.SetRequest{ + Key: "test/key1", + Value: valueAny, + } + _, err = client1.Set(setReq1) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val11, err := client1.Get("test/key1") + getReq1 := &management.GetRequest{ + Key: "test/key1", + } + getRes11, err := client1.Get(getReq1) + if err != nil { + t.Fatalf("%v", err) + } + val11, err := protobuf.MarshalAny(getRes11.Value) if err != nil { t.Fatalf("%v", err) } expVal11 := "val1" actVal11 := *val11.(*string) - if expVal11 != actVal11 { + if !cmp.Equal(expVal11, actVal11) { t.Fatalf("expected content to see %v, saw %v", expVal11, actVal11) } - val21, err := client2.Get("test/key1") + getRes21, err := client2.Get(getReq1) + if err != nil { + t.Fatalf("%v", err) + } + val21, err := protobuf.MarshalAny(getRes21.Value) if err != nil { t.Fatalf("%v", err) } expVal21 := "val1" actVal21 := *val21.(*string) - if expVal21 != actVal21 { + if !cmp.Equal(expVal21, actVal21) { t.Fatalf("expected content to see %v, saw %v", expVal21, actVal21) } - val31, err := client3.Get("test/key1") + getRes31, err := client3.Get(getReq1) + if err != nil { + t.Fatalf("%v", err) + } + val31, err := protobuf.MarshalAny(getRes31.Value) if err != nil { t.Fatalf("%v", err) } expVal31 := "val1" actVal31 := *val31.(*string) - if expVal31 != actVal31 { + if !cmp.Equal(expVal31, actVal31) { t.Fatalf("expected content to see %v, saw %v", expVal31, actVal31) } - err = client2.Set("test/key2", "val2") + valueAny = &any.Any{} + err = protobuf.UnmarshalAny("val2", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq2 := &management.SetRequest{ + Key: "test/key2", + Value: valueAny, + } + _, err = client2.Set(setReq2) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val12, err := client1.Get("test/key2") + getReq2 := &management.GetRequest{ + Key: "test/key2", + } + getRes12, err := client1.Get(getReq2) + if err != nil { + t.Fatalf("%v", err) + } + val12, err := protobuf.MarshalAny(getRes12.Value) if err != nil { t.Fatalf("%v", err) } expVal12 := "val2" actVal12 := *val12.(*string) - if expVal12 != actVal12 { + if !cmp.Equal(expVal12, actVal12) { t.Fatalf("expected content to see %v, saw %v", expVal12, actVal12) } - val22, err := client2.Get("test/key2") + getRes22, err := client2.Get(getReq2) + if err != nil { + t.Fatalf("%v", err) + } + val22, err := protobuf.MarshalAny(getRes22.Value) if err != nil { t.Fatalf("%v", err) } expVal22 := "val2" actVal22 := *val22.(*string) - if expVal22 != actVal22 { + if !cmp.Equal(expVal22, actVal22) { t.Fatalf("expected content to see %v, saw %v", expVal22, actVal22) } - val32, err := client3.Get("test/key2") + getRes32, err := client3.Get(getReq2) + if err != nil { + t.Fatalf("%v", err) + } + val32, err := protobuf.MarshalAny(getRes32.Value) if err != nil { t.Fatalf("%v", err) } expVal32 := "val2" actVal32 := *val32.(*string) - if expVal32 != actVal32 { + if !cmp.Equal(expVal32, actVal32) { t.Fatalf("expected content to see %v, saw %v", expVal32, actVal32) } - err = client3.Set("test/key3", "val3") + valueAny = &any.Any{} + err = protobuf.UnmarshalAny("val3", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq3 := &management.SetRequest{ + Key: "test/key3", + Value: valueAny, + } + _, err = client3.Set(setReq3) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val13, err := client1.Get("test/key3") + getReq3 := &management.GetRequest{ + Key: "test/key3", + } + getRes13, err := client1.Get(getReq3) + if err != nil { + t.Fatalf("%v", err) + } + val13, err := protobuf.MarshalAny(getRes13.Value) if err != nil { t.Fatalf("%v", err) } expVal13 := "val3" actVal13 := *val13.(*string) - if expVal13 != actVal13 { + if !cmp.Equal(expVal13, actVal13) { t.Fatalf("expected content to see %v, saw %v", expVal13, actVal13) } - val23, err := client2.Get("test/key3") + getRes23, err := client2.Get(getReq3) + if err != nil { + t.Fatalf("%v", err) + } + val23, err := protobuf.MarshalAny(getRes23.Value) if err != nil { t.Fatalf("%v", err) } expVal23 := "val3" actVal23 := *val23.(*string) - if expVal23 != actVal23 { + if !cmp.Equal(expVal23, actVal23) { t.Fatalf("expected content to see %v, saw %v", expVal23, actVal23) } - val33, err := client3.Get("test/key3") + getRes33, err := client3.Get(getReq3) + if err != nil { + t.Fatalf("%v", err) + } + val33, err := protobuf.MarshalAny(getRes33.Value) if err != nil { t.Fatalf("%v", err) } expVal33 := "val3" actVal33 := *val33.(*string) - if expVal33 != actVal33 { + if !cmp.Equal(expVal33, actVal33) { t.Fatalf("expected content to see %v, saw %v", expVal33, actVal33) } } -func TestCluster_GetState(t *testing.T) { +func TestCluster_Get(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -1807,8 +1989,9 @@ func TestCluster_GetState(t *testing.T) { peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + nodeId1 := "node-1" bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() defer func() { @@ -1821,8 +2004,9 @@ func TestCluster_GetState(t *testing.T) { BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -1847,10 +2031,14 @@ func TestCluster_GetState(t *testing.T) { // start server server1.Start() + // sleep + time.Sleep(5 * time.Second) + peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + nodeId2 := "node-2" bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() defer func() { @@ -1863,8 +2051,9 @@ func TestCluster_GetState(t *testing.T) { BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -1889,10 +2078,14 @@ func TestCluster_GetState(t *testing.T) { // start server server2.Start() + // sleep + time.Sleep(5 * time.Second) + peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + nodeId3 := "node-3" bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() defer func() { @@ -1905,8 +2098,9 @@ func TestCluster_GetState(t *testing.T) { BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -1957,113 +2151,185 @@ func TestCluster_GetState(t *testing.T) { t.Fatalf("%v", err) } - err = client1.Set("test/key1", "val1") + valueAny := &any.Any{} + err = protobuf.UnmarshalAny("val1", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq1 := &management.SetRequest{ + Key: "test/key1", + Value: valueAny, + } + _, err = client1.Set(setReq1) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val11, err := client1.Get("test/key1") + getReq1 := &management.GetRequest{ + Key: "test/key1", + } + getRes11, err := client1.Get(getReq1) + if err != nil { + t.Fatalf("%v", err) + } + val11, err := protobuf.MarshalAny(getRes11.Value) if err != nil { t.Fatalf("%v", err) } expVal11 := "val1" actVal11 := *val11.(*string) - if expVal11 != actVal11 { + if !cmp.Equal(expVal11, actVal11) { t.Fatalf("expected content to see %v, saw %v", expVal11, actVal11) } - val21, err := client2.Get("test/key1") + getRes21, err := client2.Get(getReq1) + if err != nil { + t.Fatalf("%v", err) + } + val21, err := protobuf.MarshalAny(getRes21.Value) if err != nil { t.Fatalf("%v", err) } expVal21 := "val1" actVal21 := *val21.(*string) - if expVal21 != actVal21 { + if !cmp.Equal(expVal21, actVal21) { t.Fatalf("expected content to see %v, saw %v", expVal21, actVal21) } - val31, err := client3.Get("test/key1") + getRes31, err := client3.Get(getReq1) + if err != nil { + t.Fatalf("%v", err) + } + val31, err := protobuf.MarshalAny(getRes31.Value) if err != nil { t.Fatalf("%v", err) } expVal31 := "val1" actVal31 := *val31.(*string) - if expVal31 != actVal31 { + if !cmp.Equal(expVal31, actVal31) { t.Fatalf("expected content to see %v, saw %v", expVal31, actVal31) } - err = client2.Set("test/key2", "val2") + valueAny = &any.Any{} + err = protobuf.UnmarshalAny("val2", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq2 := &management.SetRequest{ + Key: "test/key2", + Value: valueAny, + } + _, err = client2.Set(setReq2) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val12, err := client1.Get("test/key2") + getReq2 := &management.GetRequest{ + Key: "test/key2", + } + getRes12, err := client1.Get(getReq2) + if err != nil { + t.Fatalf("%v", err) + } + val12, err := protobuf.MarshalAny(getRes12.Value) if err != nil { t.Fatalf("%v", err) } expVal12 := "val2" actVal12 := *val12.(*string) - if expVal12 != actVal12 { + if !cmp.Equal(expVal12, actVal12) { t.Fatalf("expected content to see %v, saw %v", expVal12, actVal12) } - val22, err := client2.Get("test/key2") + getRes22, err := client2.Get(getReq2) + if err != nil { + t.Fatalf("%v", err) + } + val22, err := protobuf.MarshalAny(getRes22.Value) if err != nil { t.Fatalf("%v", err) } expVal22 := "val2" actVal22 := *val22.(*string) - if expVal22 != actVal22 { + if !cmp.Equal(expVal22, actVal22) { t.Fatalf("expected content to see %v, saw %v", expVal22, actVal22) } - val32, err := client3.Get("test/key2") + getRes32, err := client3.Get(getReq2) + if err != nil { + t.Fatalf("%v", err) + } + val32, err := protobuf.MarshalAny(getRes32.Value) if err != nil { t.Fatalf("%v", err) } expVal32 := "val2" actVal32 := *val32.(*string) - if expVal32 != actVal32 { + if !cmp.Equal(expVal32, actVal32) { t.Fatalf("expected content to see %v, saw %v", expVal32, actVal32) } - err = client3.Set("test/key3", "val3") + valueAny = &any.Any{} + err = protobuf.UnmarshalAny("val3", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq3 := &management.SetRequest{ + Key: "test/key3", + Value: valueAny, + } + _, err = client3.Set(setReq3) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val13, err := client1.Get("test/key3") + getReq3 := &management.GetRequest{ + Key: "test/key3", + } + getRes13, err := client1.Get(getReq3) + if err != nil { + t.Fatalf("%v", err) + } + val13, err := protobuf.MarshalAny(getRes13.Value) if err != nil { t.Fatalf("%v", err) } expVal13 := "val3" actVal13 := *val13.(*string) - if expVal13 != actVal13 { + if !cmp.Equal(expVal13, actVal13) { t.Fatalf("expected content to see %v, saw %v", expVal13, actVal13) } - val23, err := client2.Get("test/key3") + getRes23, err := client2.Get(getReq3) + if err != nil { + t.Fatalf("%v", err) + } + val23, err := protobuf.MarshalAny(getRes23.Value) if err != nil { t.Fatalf("%v", err) } expVal23 := "val3" actVal23 := *val23.(*string) - if expVal23 != actVal23 { + if !cmp.Equal(expVal23, actVal23) { t.Fatalf("expected content to see %v, saw %v", expVal23, actVal23) } - val33, err := client3.Get("test/key3") + getRes33, err := client3.Get(getReq3) + if err != nil { + t.Fatalf("%v", err) + } + val33, err := protobuf.MarshalAny(getRes33.Value) if err != nil { t.Fatalf("%v", err) } expVal33 := "val3" actVal33 := *val33.(*string) - if expVal33 != actVal33 { + if !cmp.Equal(expVal33, actVal33) { t.Fatalf("expected content to see %v, saw %v", expVal33, actVal33) } } -func TestCluster_DeleteState(t *testing.T) { +func TestCluster_Delete(t *testing.T) { curDir, _ := os.Getwd() logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) @@ -2072,8 +2338,9 @@ func TestCluster_DeleteState(t *testing.T) { peerGrpcAddress1 := "" grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + nodeId1 := "node-1" bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() defer func() { @@ -2086,8 +2353,9 @@ func TestCluster_DeleteState(t *testing.T) { BindAddress: bindAddress1, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - HttpAddress: httpAddress1, + GrpcAddress: grpcAddress1, + GrpcGatewayAddress: grpcGatewayAddress1, + HttpAddress: httpAddress1, }, } @@ -2112,10 +2380,14 @@ func TestCluster_DeleteState(t *testing.T) { // start server server1.Start() + // sleep + time.Sleep(5 * time.Second) + peerGrpcAddress2 := grpcAddress1 grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + nodeId2 := "node-2" bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() defer func() { @@ -2128,8 +2400,9 @@ func TestCluster_DeleteState(t *testing.T) { BindAddress: bindAddress2, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - HttpAddress: httpAddress2, + GrpcAddress: grpcAddress2, + GrpcGatewayAddress: grpcGatewayAddress2, + HttpAddress: httpAddress2, }, } @@ -2154,10 +2427,14 @@ func TestCluster_DeleteState(t *testing.T) { // start server server2.Start() + // sleep + time.Sleep(5 * time.Second) + peerGrpcAddress3 := grpcAddress1 grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + nodeId3 := "node-3" bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() defer func() { @@ -2170,8 +2447,9 @@ func TestCluster_DeleteState(t *testing.T) { BindAddress: bindAddress3, State: management.Node_UNKNOWN, Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - HttpAddress: httpAddress3, + GrpcAddress: grpcAddress3, + GrpcGatewayAddress: grpcGatewayAddress3, + HttpAddress: httpAddress3, }, } @@ -2222,215 +2500,275 @@ func TestCluster_DeleteState(t *testing.T) { t.Fatalf("%v", err) } - // set test data before delete - err = client1.Set("test/key1", "val1") + valueAny := &any.Any{} + err = protobuf.UnmarshalAny("val1", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq1 := &management.SetRequest{ + Key: "test/key1", + Value: valueAny, + } + _, err = client1.Set(setReq1) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val11, err := client1.Get("test/key1") + getReq1 := &management.GetRequest{ + Key: "test/key1", + } + getRes11, err := client1.Get(getReq1) + if err != nil { + t.Fatalf("%v", err) + } + val11, err := protobuf.MarshalAny(getRes11.Value) if err != nil { t.Fatalf("%v", err) } expVal11 := "val1" actVal11 := *val11.(*string) - if expVal11 != actVal11 { + if !cmp.Equal(expVal11, actVal11) { t.Fatalf("expected content to see %v, saw %v", expVal11, actVal11) } - val21, err := client2.Get("test/key1") + getRes21, err := client2.Get(getReq1) + if err != nil { + t.Fatalf("%v", err) + } + val21, err := protobuf.MarshalAny(getRes21.Value) if err != nil { t.Fatalf("%v", err) } expVal21 := "val1" actVal21 := *val21.(*string) - if expVal21 != actVal21 { + if !cmp.Equal(expVal21, actVal21) { t.Fatalf("expected content to see %v, saw %v", expVal21, actVal21) } - val31, err := client3.Get("test/key1") + getRes31, err := client3.Get(getReq1) + if err != nil { + t.Fatalf("%v", err) + } + val31, err := protobuf.MarshalAny(getRes31.Value) if err != nil { t.Fatalf("%v", err) } expVal31 := "val1" actVal31 := *val31.(*string) - if expVal31 != actVal31 { + if !cmp.Equal(expVal31, actVal31) { t.Fatalf("expected content to see %v, saw %v", expVal31, actVal31) } - err = client2.Set("test/key2", "val2") + valueAny = &any.Any{} + err = protobuf.UnmarshalAny("val2", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq2 := &management.SetRequest{ + Key: "test/key2", + Value: valueAny, + } + _, err = client2.Set(setReq2) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val12, err := client1.Get("test/key2") + getReq2 := &management.GetRequest{ + Key: "test/key2", + } + getRes12, err := client1.Get(getReq2) + if err != nil { + t.Fatalf("%v", err) + } + val12, err := protobuf.MarshalAny(getRes12.Value) if err != nil { t.Fatalf("%v", err) } expVal12 := "val2" actVal12 := *val12.(*string) - if expVal12 != actVal12 { + if !cmp.Equal(expVal12, actVal12) { t.Fatalf("expected content to see %v, saw %v", expVal12, actVal12) } - val22, err := client2.Get("test/key2") + getRes22, err := client2.Get(getReq2) + if err != nil { + t.Fatalf("%v", err) + } + val22, err := protobuf.MarshalAny(getRes22.Value) if err != nil { t.Fatalf("%v", err) } expVal22 := "val2" actVal22 := *val22.(*string) - if expVal22 != actVal22 { + if !cmp.Equal(expVal22, actVal22) { t.Fatalf("expected content to see %v, saw %v", expVal22, actVal22) } - val32, err := client3.Get("test/key2") + getRes32, err := client3.Get(getReq2) + if err != nil { + t.Fatalf("%v", err) + } + val32, err := protobuf.MarshalAny(getRes32.Value) if err != nil { t.Fatalf("%v", err) } expVal32 := "val2" actVal32 := *val32.(*string) - if expVal32 != actVal32 { + if !cmp.Equal(expVal32, actVal32) { t.Fatalf("expected content to see %v, saw %v", expVal32, actVal32) } - err = client3.Set("test/key3", "val3") + valueAny = &any.Any{} + err = protobuf.UnmarshalAny("val3", valueAny) + if err != nil { + t.Fatalf("%v", err) + } + setReq3 := &management.SetRequest{ + Key: "test/key3", + Value: valueAny, + } + _, err = client3.Set(setReq3) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val13, err := client1.Get("test/key3") + getReq3 := &management.GetRequest{ + Key: "test/key3", + } + getRes13, err := client1.Get(getReq3) + if err != nil { + t.Fatalf("%v", err) + } + val13, err := protobuf.MarshalAny(getRes13.Value) if err != nil { t.Fatalf("%v", err) } expVal13 := "val3" actVal13 := *val13.(*string) - if expVal13 != actVal13 { + if !cmp.Equal(expVal13, actVal13) { t.Fatalf("expected content to see %v, saw %v", expVal13, actVal13) } - val23, err := client2.Get("test/key3") + getRes23, err := client2.Get(getReq3) + if err != nil { + t.Fatalf("%v", err) + } + val23, err := protobuf.MarshalAny(getRes23.Value) if err != nil { t.Fatalf("%v", err) } expVal23 := "val3" actVal23 := *val23.(*string) - if expVal23 != actVal23 { + if !cmp.Equal(expVal23, actVal23) { t.Fatalf("expected content to see %v, saw %v", expVal23, actVal23) } - val33, err := client3.Get("test/key3") + getRes33, err := client3.Get(getReq3) + if err != nil { + t.Fatalf("%v", err) + } + val33, err := protobuf.MarshalAny(getRes33.Value) if err != nil { t.Fatalf("%v", err) } expVal33 := "val3" actVal33 := *val33.(*string) - if expVal33 != actVal33 { + if !cmp.Equal(expVal33, actVal33) { t.Fatalf("expected content to see %v, saw %v", expVal33, actVal33) } // delete - err = client1.Delete("test/key1") + deleteReq1 := &management.DeleteRequest{ + Key: "test/key1", + } + _, err = client1.Delete(deleteReq1) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val11, err = client1.Get("test/key1") - if err != blasterrors.ErrNotFound { + getRes11, err = client1.Get(getReq1) + if err != nil { t.Fatalf("%v", err) } - if val11 != nil { + if getRes11.Value != nil { t.Fatalf("%v", err) } - val21, err = client2.Get("test/key1") - if err != blasterrors.ErrNotFound { + getRes21, err = client2.Get(getReq1) + if err != nil { t.Fatalf("%v", err) } - if val21 != nil { + if getRes21.Value != nil { t.Fatalf("%v", err) } - val31, err = client3.Get("test/key1") - if err != blasterrors.ErrNotFound { + getRes31, err = client3.Get(getReq1) + if err != nil { t.Fatalf("%v", err) } - if val31 != nil { + if getRes31.Value != nil { t.Fatalf("%v", err) } - err = client2.Delete("test/key2") + deleteReq2 := &management.DeleteRequest{ + Key: "test/key2", + } + _, err = client2.Delete(deleteReq2) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate // get value from all nodes - val12, err = client1.Get("test/key2") - if err != blasterrors.ErrNotFound { + getRes12, err = client1.Get(getReq2) + if err != nil { t.Fatalf("%v", err) } - if val12 != nil { + if getRes12.Value != nil { t.Fatalf("%v", err) } - val22, err = client2.Get("test/key2") - if err != blasterrors.ErrNotFound { + getRes22, err = client2.Get(getReq2) + if err != nil { t.Fatalf("%v", err) } - if val22 != nil { + if getRes22.Value != nil { t.Fatalf("%v", err) } - val32, err = client3.Get("test/key2") - if err != blasterrors.ErrNotFound { + getRes32, err = client3.Get(getReq2) + if err != nil { t.Fatalf("%v", err) } - if val32 != nil { + if getRes32.Value != nil { t.Fatalf("%v", err) } - err = client3.Delete("test/key3") + deleteReq3 := &management.DeleteRequest{ + Key: "test/key2", + } + _, err = client3.Delete(deleteReq3) if err != nil { t.Fatalf("%v", err) } time.Sleep(2 * time.Second) // wait for data to propagate - // get value from all nodes - val13, err = client1.Get("test/key3") - if err != blasterrors.ErrNotFound { - t.Fatalf("%v", err) - } - if val13 != nil { - t.Fatalf("%v", err) - } - val23, err = client2.Get("test/key3") - if err != blasterrors.ErrNotFound { - t.Fatalf("%v", err) - } - if val23 != nil { - t.Fatalf("%v", err) - } - val33, err = client3.Get("test/key3") - if err != blasterrors.ErrNotFound { - t.Fatalf("%v", err) - } - if val33 != nil { - t.Fatalf("%v", err) - } - // delete non-existing data from manager1 - err = client1.Delete("test/non-existing") - if err == nil { + deleteNonExistingReq := &management.DeleteRequest{ + Key: "test/non-existing", + } + _, err = client1.Delete(deleteNonExistingReq) + if err != nil { t.Fatalf("%v", err) } // delete non-existing data from manager2 - err = client2.Delete("test/non-existing") - if err == nil { + _, err = client2.Delete(deleteNonExistingReq) + if err != nil { t.Fatalf("%v", err) } // delete non-existing data from manager3 - err = client3.Delete("test/non-existing") - if err == nil { + _, err = client3.Delete(deleteNonExistingReq) + if err != nil { t.Fatalf("%v", err) } } diff --git a/maputils/maputils.go b/maputils/maputils.go index 36ffe67..a5922fd 100644 --- a/maputils/maputils.go +++ b/maputils/maputils.go @@ -173,7 +173,12 @@ func (m Map) Get(key string) (interface{}, error) { } } - return value, nil + switch value.(type) { + case Map: + return value.(Map).ToMap(), nil + default: + return value, nil + } } func (m Map) Delete(key string) error { diff --git a/maputils/maputils_test.go b/maputils/maputils_test.go index 9fb2bcb..d71e400 100644 --- a/maputils/maputils_test.go +++ b/maputils/maputils_test.go @@ -516,8 +516,8 @@ func Test_Get(t *testing.T) { if err != nil { t.Fatalf("%v", err) } - exp2 := Map{ - "b": Map{ + exp2 := map[string]interface{}{ + "b": map[string]interface{}{ "c": "abc", "d": "abd", }, diff --git a/protobuf/distribute/distribute.pb.go b/protobuf/distribute/distribute.pb.go index d37e934..a942d09 100644 --- a/protobuf/distribute/distribute.pb.go +++ b/protobuf/distribute/distribute.pb.go @@ -8,8 +8,12 @@ import ( fmt "fmt" proto "github.com/golang/protobuf/proto" any "github.com/golang/protobuf/ptypes/any" + empty "github.com/golang/protobuf/ptypes/empty" index "github.com/mosuka/blast/protobuf/index" + _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" math "math" ) @@ -22,26 +26,29 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type NodeHealthCheckRequest_Probe int32 const ( - NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 0 - NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 1 - NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 2 + NodeHealthCheckRequest_UNKNOWN NodeHealthCheckRequest_Probe = 0 + NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 1 + NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 2 + NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 3 ) var NodeHealthCheckRequest_Probe_name = map[int32]string{ - 0: "HEALTHINESS", - 1: "LIVENESS", - 2: "READINESS", + 0: "UNKNOWN", + 1: "HEALTHINESS", + 2: "LIVENESS", + 3: "READINESS", } var NodeHealthCheckRequest_Probe_value = map[string]int32{ - "HEALTHINESS": 0, - "LIVENESS": 1, - "READINESS": 2, + "UNKNOWN": 0, + "HEALTHINESS": 1, + "LIVENESS": 2, + "READINESS": 3, } func (x NodeHealthCheckRequest_Probe) String() string { @@ -55,30 +62,33 @@ func (NodeHealthCheckRequest_Probe) EnumDescriptor() ([]byte, []int) { type NodeHealthCheckResponse_State int32 const ( - NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 0 - NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 1 - NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 2 - NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 3 - NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 4 - NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 5 + NodeHealthCheckResponse_UNKNOWN NodeHealthCheckResponse_State = 0 + NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 1 + NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 2 + NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 3 + NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 4 + NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 5 + NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 6 ) var NodeHealthCheckResponse_State_name = map[int32]string{ - 0: "HEALTHY", - 1: "UNHEALTHY", - 2: "ALIVE", - 3: "DEAD", - 4: "READY", - 5: "NOT_READY", + 0: "UNKNOWN", + 1: "HEALTHY", + 2: "UNHEALTHY", + 3: "ALIVE", + 4: "DEAD", + 5: "READY", + 6: "NOT_READY", } var NodeHealthCheckResponse_State_value = map[string]int32{ - "HEALTHY": 0, - "UNHEALTHY": 1, - "ALIVE": 2, - "DEAD": 3, - "READY": 4, - "NOT_READY": 5, + "UNKNOWN": 0, + "HEALTHY": 1, + "UNHEALTHY": 2, + "ALIVE": 3, + "DEAD": 4, + "READY": 5, + "NOT_READY": 6, } func (x NodeHealthCheckResponse_State) String() string { @@ -125,7 +135,7 @@ func (m *NodeHealthCheckRequest) GetProbe() NodeHealthCheckRequest_Probe { if m != nil { return m.Probe } - return NodeHealthCheckRequest_HEALTHINESS + return NodeHealthCheckRequest_UNKNOWN } type NodeHealthCheckResponse struct { @@ -164,237 +174,323 @@ func (m *NodeHealthCheckResponse) GetState() NodeHealthCheckResponse_State { if m != nil { return m.State } - return NodeHealthCheckResponse_HEALTHY + return NodeHealthCheckResponse_UNKNOWN } -type GetDocumentRequest struct { +type GetRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *GetDocumentRequest) Reset() { *m = GetDocumentRequest{} } -func (m *GetDocumentRequest) String() string { return proto.CompactTextString(m) } -func (*GetDocumentRequest) ProtoMessage() {} -func (*GetDocumentRequest) Descriptor() ([]byte, []int) { +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} +func (*GetRequest) Descriptor() ([]byte, []int) { return fileDescriptor_0b1b3e8a99d31c9c, []int{2} } -func (m *GetDocumentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDocumentRequest.Unmarshal(m, b) +func (m *GetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetRequest.Unmarshal(m, b) } -func (m *GetDocumentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDocumentRequest.Marshal(b, m, deterministic) +func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) } -func (m *GetDocumentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDocumentRequest.Merge(m, src) +func (m *GetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRequest.Merge(m, src) } -func (m *GetDocumentRequest) XXX_Size() int { - return xxx_messageInfo_GetDocumentRequest.Size(m) +func (m *GetRequest) XXX_Size() int { + return xxx_messageInfo_GetRequest.Size(m) } -func (m *GetDocumentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetDocumentRequest.DiscardUnknown(m) +func (m *GetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRequest.DiscardUnknown(m) } -var xxx_messageInfo_GetDocumentRequest proto.InternalMessageInfo +var xxx_messageInfo_GetRequest proto.InternalMessageInfo -func (m *GetDocumentRequest) GetId() string { +func (m *GetRequest) GetId() string { if m != nil { return m.Id } return "" } -type GetDocumentResponse struct { - Document *index.Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type GetResponse struct { + Fields *any.Any `protobuf:"bytes,1,opt,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *GetDocumentResponse) Reset() { *m = GetDocumentResponse{} } -func (m *GetDocumentResponse) String() string { return proto.CompactTextString(m) } -func (*GetDocumentResponse) ProtoMessage() {} -func (*GetDocumentResponse) Descriptor() ([]byte, []int) { +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} +func (*GetResponse) Descriptor() ([]byte, []int) { return fileDescriptor_0b1b3e8a99d31c9c, []int{3} } -func (m *GetDocumentResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDocumentResponse.Unmarshal(m, b) +func (m *GetResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetResponse.Unmarshal(m, b) } -func (m *GetDocumentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDocumentResponse.Marshal(b, m, deterministic) +func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) } -func (m *GetDocumentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDocumentResponse.Merge(m, src) +func (m *GetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResponse.Merge(m, src) } -func (m *GetDocumentResponse) XXX_Size() int { - return xxx_messageInfo_GetDocumentResponse.Size(m) +func (m *GetResponse) XXX_Size() int { + return xxx_messageInfo_GetResponse.Size(m) } -func (m *GetDocumentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetDocumentResponse.DiscardUnknown(m) +func (m *GetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetResponse.DiscardUnknown(m) } -var xxx_messageInfo_GetDocumentResponse proto.InternalMessageInfo +var xxx_messageInfo_GetResponse proto.InternalMessageInfo -func (m *GetDocumentResponse) GetDocument() *index.Document { +func (m *GetResponse) GetFields() *any.Any { if m != nil { - return m.Document + return m.Fields } return nil } -type IndexDocumentRequest struct { - Document *index.Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type IndexRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *IndexDocumentRequest) Reset() { *m = IndexDocumentRequest{} } -func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) } -func (*IndexDocumentRequest) ProtoMessage() {} -func (*IndexDocumentRequest) Descriptor() ([]byte, []int) { +func (m *IndexRequest) Reset() { *m = IndexRequest{} } +func (m *IndexRequest) String() string { return proto.CompactTextString(m) } +func (*IndexRequest) ProtoMessage() {} +func (*IndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor_0b1b3e8a99d31c9c, []int{4} } -func (m *IndexDocumentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexDocumentRequest.Unmarshal(m, b) +func (m *IndexRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IndexRequest.Unmarshal(m, b) } -func (m *IndexDocumentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexDocumentRequest.Marshal(b, m, deterministic) +func (m *IndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IndexRequest.Marshal(b, m, deterministic) } -func (m *IndexDocumentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexDocumentRequest.Merge(m, src) +func (m *IndexRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_IndexRequest.Merge(m, src) } -func (m *IndexDocumentRequest) XXX_Size() int { - return xxx_messageInfo_IndexDocumentRequest.Size(m) +func (m *IndexRequest) XXX_Size() int { + return xxx_messageInfo_IndexRequest.Size(m) } -func (m *IndexDocumentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_IndexDocumentRequest.DiscardUnknown(m) +func (m *IndexRequest) XXX_DiscardUnknown() { + xxx_messageInfo_IndexRequest.DiscardUnknown(m) } -var xxx_messageInfo_IndexDocumentRequest proto.InternalMessageInfo +var xxx_messageInfo_IndexRequest proto.InternalMessageInfo + +func (m *IndexRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} -func (m *IndexDocumentRequest) GetDocument() *index.Document { +func (m *IndexRequest) GetFields() *any.Any { if m != nil { - return m.Document + return m.Fields } return nil } -type IndexDocumentResponse struct { - Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` +type DeleteRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *IndexDocumentResponse) Reset() { *m = IndexDocumentResponse{} } -func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) } -func (*IndexDocumentResponse) ProtoMessage() {} -func (*IndexDocumentResponse) Descriptor() ([]byte, []int) { +func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } +func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRequest) ProtoMessage() {} +func (*DeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor_0b1b3e8a99d31c9c, []int{5} } -func (m *IndexDocumentResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexDocumentResponse.Unmarshal(m, b) +func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) +} +func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) +} +func (m *DeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteRequest.Merge(m, src) +} +func (m *DeleteRequest) XXX_Size() int { + return xxx_messageInfo_DeleteRequest.Size(m) +} +func (m *DeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo + +func (m *DeleteRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type BulkIndexRequest struct { + Documents []*index.Document `protobuf:"bytes,1,rep,name=documents,proto3" json:"documents,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BulkIndexRequest) Reset() { *m = BulkIndexRequest{} } +func (m *BulkIndexRequest) String() string { return proto.CompactTextString(m) } +func (*BulkIndexRequest) ProtoMessage() {} +func (*BulkIndexRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{6} +} + +func (m *BulkIndexRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkIndexRequest.Unmarshal(m, b) } -func (m *IndexDocumentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexDocumentResponse.Marshal(b, m, deterministic) +func (m *BulkIndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkIndexRequest.Marshal(b, m, deterministic) } -func (m *IndexDocumentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexDocumentResponse.Merge(m, src) +func (m *BulkIndexRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkIndexRequest.Merge(m, src) } -func (m *IndexDocumentResponse) XXX_Size() int { - return xxx_messageInfo_IndexDocumentResponse.Size(m) +func (m *BulkIndexRequest) XXX_Size() int { + return xxx_messageInfo_BulkIndexRequest.Size(m) } -func (m *IndexDocumentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_IndexDocumentResponse.DiscardUnknown(m) +func (m *BulkIndexRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BulkIndexRequest.DiscardUnknown(m) } -var xxx_messageInfo_IndexDocumentResponse proto.InternalMessageInfo +var xxx_messageInfo_BulkIndexRequest proto.InternalMessageInfo -func (m *IndexDocumentResponse) GetCount() int32 { +func (m *BulkIndexRequest) GetDocuments() []*index.Document { + if m != nil { + return m.Documents + } + return nil +} + +type BulkIndexResponse struct { + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BulkIndexResponse) Reset() { *m = BulkIndexResponse{} } +func (m *BulkIndexResponse) String() string { return proto.CompactTextString(m) } +func (*BulkIndexResponse) ProtoMessage() {} +func (*BulkIndexResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{7} +} + +func (m *BulkIndexResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkIndexResponse.Unmarshal(m, b) +} +func (m *BulkIndexResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkIndexResponse.Marshal(b, m, deterministic) +} +func (m *BulkIndexResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkIndexResponse.Merge(m, src) +} +func (m *BulkIndexResponse) XXX_Size() int { + return xxx_messageInfo_BulkIndexResponse.Size(m) +} +func (m *BulkIndexResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BulkIndexResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BulkIndexResponse proto.InternalMessageInfo + +func (m *BulkIndexResponse) GetCount() int32 { if m != nil { return m.Count } return 0 } -type DeleteDocumentRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +type BulkDeleteRequest struct { + Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *DeleteDocumentRequest) Reset() { *m = DeleteDocumentRequest{} } -func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteDocumentRequest) ProtoMessage() {} -func (*DeleteDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{6} +func (m *BulkDeleteRequest) Reset() { *m = BulkDeleteRequest{} } +func (m *BulkDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*BulkDeleteRequest) ProtoMessage() {} +func (*BulkDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{8} } -func (m *DeleteDocumentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteDocumentRequest.Unmarshal(m, b) +func (m *BulkDeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkDeleteRequest.Unmarshal(m, b) } -func (m *DeleteDocumentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteDocumentRequest.Marshal(b, m, deterministic) +func (m *BulkDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkDeleteRequest.Marshal(b, m, deterministic) } -func (m *DeleteDocumentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteDocumentRequest.Merge(m, src) +func (m *BulkDeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkDeleteRequest.Merge(m, src) } -func (m *DeleteDocumentRequest) XXX_Size() int { - return xxx_messageInfo_DeleteDocumentRequest.Size(m) +func (m *BulkDeleteRequest) XXX_Size() int { + return xxx_messageInfo_BulkDeleteRequest.Size(m) } -func (m *DeleteDocumentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteDocumentRequest.DiscardUnknown(m) +func (m *BulkDeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BulkDeleteRequest.DiscardUnknown(m) } -var xxx_messageInfo_DeleteDocumentRequest proto.InternalMessageInfo +var xxx_messageInfo_BulkDeleteRequest proto.InternalMessageInfo -func (m *DeleteDocumentRequest) GetId() string { +func (m *BulkDeleteRequest) GetIds() []string { if m != nil { - return m.Id + return m.Ids } - return "" + return nil } -type DeleteDocumentResponse struct { +type BulkDeleteResponse struct { Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *DeleteDocumentResponse) Reset() { *m = DeleteDocumentResponse{} } -func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteDocumentResponse) ProtoMessage() {} -func (*DeleteDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{7} +func (m *BulkDeleteResponse) Reset() { *m = BulkDeleteResponse{} } +func (m *BulkDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*BulkDeleteResponse) ProtoMessage() {} +func (*BulkDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0b1b3e8a99d31c9c, []int{9} } -func (m *DeleteDocumentResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteDocumentResponse.Unmarshal(m, b) +func (m *BulkDeleteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkDeleteResponse.Unmarshal(m, b) } -func (m *DeleteDocumentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteDocumentResponse.Marshal(b, m, deterministic) +func (m *BulkDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkDeleteResponse.Marshal(b, m, deterministic) } -func (m *DeleteDocumentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteDocumentResponse.Merge(m, src) +func (m *BulkDeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkDeleteResponse.Merge(m, src) } -func (m *DeleteDocumentResponse) XXX_Size() int { - return xxx_messageInfo_DeleteDocumentResponse.Size(m) +func (m *BulkDeleteResponse) XXX_Size() int { + return xxx_messageInfo_BulkDeleteResponse.Size(m) } -func (m *DeleteDocumentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteDocumentResponse.DiscardUnknown(m) +func (m *BulkDeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BulkDeleteResponse.DiscardUnknown(m) } -var xxx_messageInfo_DeleteDocumentResponse proto.InternalMessageInfo +var xxx_messageInfo_BulkDeleteResponse proto.InternalMessageInfo -func (m *DeleteDocumentResponse) GetCount() int32 { +func (m *BulkDeleteResponse) GetCount() int32 { if m != nil { return m.Count } @@ -412,7 +508,7 @@ func (m *SearchRequest) Reset() { *m = SearchRequest{} } func (m *SearchRequest) String() string { return proto.CompactTextString(m) } func (*SearchRequest) ProtoMessage() {} func (*SearchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{8} + return fileDescriptor_0b1b3e8a99d31c9c, []int{10} } func (m *SearchRequest) XXX_Unmarshal(b []byte) error { @@ -451,7 +547,7 @@ func (m *SearchResponse) Reset() { *m = SearchResponse{} } func (m *SearchResponse) String() string { return proto.CompactTextString(m) } func (*SearchResponse) ProtoMessage() {} func (*SearchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{9} + return fileDescriptor_0b1b3e8a99d31c9c, []int{11} } func (m *SearchResponse) XXX_Unmarshal(b []byte) error { @@ -484,12 +580,14 @@ func init() { proto.RegisterEnum("distribute.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) proto.RegisterType((*NodeHealthCheckRequest)(nil), "distribute.NodeHealthCheckRequest") proto.RegisterType((*NodeHealthCheckResponse)(nil), "distribute.NodeHealthCheckResponse") - proto.RegisterType((*GetDocumentRequest)(nil), "distribute.GetDocumentRequest") - proto.RegisterType((*GetDocumentResponse)(nil), "distribute.GetDocumentResponse") - proto.RegisterType((*IndexDocumentRequest)(nil), "distribute.IndexDocumentRequest") - proto.RegisterType((*IndexDocumentResponse)(nil), "distribute.IndexDocumentResponse") - proto.RegisterType((*DeleteDocumentRequest)(nil), "distribute.DeleteDocumentRequest") - proto.RegisterType((*DeleteDocumentResponse)(nil), "distribute.DeleteDocumentResponse") + proto.RegisterType((*GetRequest)(nil), "distribute.GetRequest") + proto.RegisterType((*GetResponse)(nil), "distribute.GetResponse") + proto.RegisterType((*IndexRequest)(nil), "distribute.IndexRequest") + proto.RegisterType((*DeleteRequest)(nil), "distribute.DeleteRequest") + proto.RegisterType((*BulkIndexRequest)(nil), "distribute.BulkIndexRequest") + proto.RegisterType((*BulkIndexResponse)(nil), "distribute.BulkIndexResponse") + proto.RegisterType((*BulkDeleteRequest)(nil), "distribute.BulkDeleteRequest") + proto.RegisterType((*BulkDeleteResponse)(nil), "distribute.BulkDeleteResponse") proto.RegisterType((*SearchRequest)(nil), "distribute.SearchRequest") proto.RegisterType((*SearchResponse)(nil), "distribute.SearchResponse") } @@ -499,42 +597,55 @@ func init() { } var fileDescriptor_0b1b3e8a99d31c9c = []byte{ - // 556 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x5d, 0x6f, 0xd3, 0x30, - 0x14, 0x5d, 0xba, 0x65, 0xb4, 0xb7, 0xeb, 0x87, 0x4c, 0x37, 0x58, 0x1e, 0x60, 0x98, 0x49, 0x14, - 0x4d, 0x73, 0xa5, 0x22, 0x1e, 0x10, 0x12, 0x28, 0x5b, 0x2a, 0x5a, 0x51, 0x95, 0x29, 0x1d, 0x88, - 0x01, 0xd2, 0x94, 0x34, 0xa6, 0x8d, 0xd6, 0xd6, 0x25, 0x76, 0x24, 0xf6, 0x2b, 0xf8, 0x27, 0x3c, - 0xf2, 0xfb, 0x50, 0xe2, 0x24, 0x24, 0x5d, 0xc8, 0xe0, 0xa5, 0xea, 0xbd, 0xf7, 0x9c, 0xe3, 0xe3, - 0xdc, 0x93, 0xc0, 0xe1, 0xca, 0x63, 0x82, 0xd9, 0xfe, 0xd7, 0x8e, 0xe3, 0x72, 0xe1, 0xb9, 0xb6, - 0x2f, 0x68, 0xea, 0x2f, 0x09, 0xc7, 0x08, 0xfe, 0x74, 0xb4, 0xfd, 0x29, 0x63, 0xd3, 0x39, 0xed, - 0x24, 0x44, 0x6b, 0x79, 0x2d, 0x61, 0x9a, 0x96, 0xf4, 0xdc, 0xa5, 0x43, 0xbf, 0xcb, 0x5f, 0x39, - 0xc3, 0x3f, 0x14, 0xd8, 0x1b, 0x31, 0x87, 0xf6, 0xa9, 0x35, 0x17, 0xb3, 0xd3, 0x19, 0x9d, 0x5c, - 0x99, 0xf4, 0x9b, 0x4f, 0xb9, 0x40, 0xaf, 0x40, 0x5d, 0x79, 0xcc, 0xa6, 0xf7, 0x95, 0x03, 0xa5, - 0x5d, 0xef, 0xb6, 0x49, 0xea, 0xfc, 0x7c, 0x0a, 0x39, 0x0b, 0xf0, 0xa6, 0xa4, 0xe1, 0xe7, 0xa0, - 0x86, 0x35, 0x6a, 0x40, 0xb5, 0xdf, 0xd3, 0x87, 0xe7, 0xfd, 0xc1, 0xa8, 0x37, 0x1e, 0x37, 0x37, - 0xd0, 0x0e, 0x94, 0x87, 0x83, 0x0f, 0xbd, 0xb0, 0x52, 0x50, 0x0d, 0x2a, 0x66, 0x4f, 0x37, 0xe4, - 0xb0, 0x84, 0x7f, 0x2a, 0x70, 0xef, 0x86, 0x3c, 0x5f, 0xb1, 0x25, 0xa7, 0xe8, 0x35, 0xa8, 0x5c, - 0x58, 0x22, 0xb6, 0xf4, 0xb4, 0xd0, 0x92, 0xe4, 0x90, 0x71, 0x40, 0x30, 0x25, 0x0f, 0x9b, 0xa0, - 0x86, 0x35, 0xaa, 0xc2, 0x1d, 0xe9, 0xe9, 0xa2, 0xb9, 0x11, 0x38, 0x78, 0x3f, 0x8a, 0x4b, 0x05, - 0x55, 0x40, 0xd5, 0x03, 0x7f, 0xcd, 0x12, 0x2a, 0xc3, 0x96, 0xd1, 0xd3, 0x8d, 0xe6, 0x66, 0xd0, - 0x0c, 0x5c, 0x5e, 0x34, 0xb7, 0x02, 0xf8, 0xe8, 0xdd, 0xf9, 0xa5, 0x2c, 0x55, 0x7c, 0x08, 0xe8, - 0x0d, 0x15, 0x06, 0x9b, 0xf8, 0x0b, 0xba, 0x14, 0xf1, 0xd3, 0xab, 0x43, 0xc9, 0x75, 0x42, 0x9f, - 0x15, 0xb3, 0xe4, 0x3a, 0xf8, 0x04, 0xee, 0x66, 0x50, 0xd1, 0x8d, 0x8e, 0xa0, 0xec, 0x44, 0xbd, - 0x10, 0x5c, 0xed, 0x36, 0x88, 0xdc, 0x4f, 0x02, 0x4d, 0x00, 0xf8, 0x14, 0x5a, 0x83, 0x60, 0xb6, - 0x7e, 0xd6, 0x7f, 0x89, 0x1c, 0xc3, 0xee, 0x9a, 0x48, 0x64, 0xa5, 0x05, 0xea, 0x84, 0xf9, 0x91, - 0x84, 0x6a, 0xca, 0x02, 0x3f, 0x81, 0x5d, 0x83, 0xce, 0xa9, 0xa0, 0xb7, 0x5d, 0x90, 0xc0, 0xde, - 0x3a, 0xb0, 0x50, 0x78, 0x08, 0xb5, 0x31, 0xb5, 0xbc, 0xc9, 0x2c, 0x16, 0x7c, 0x09, 0x75, 0x1e, - 0x36, 0x2e, 0x3d, 0xd9, 0x89, 0xee, 0xd2, 0x22, 0x32, 0xda, 0x24, 0x8e, 0x31, 0xd1, 0x97, 0xd7, - 0x66, 0x8d, 0xa7, 0xc9, 0xf8, 0x2d, 0xd4, 0x63, 0xb5, 0xe8, 0xd4, 0x17, 0x50, 0x4b, 0xe4, 0xb8, - 0x3f, 0x2f, 0x56, 0xdb, 0x89, 0xd5, 0x02, 0x64, 0xf7, 0xd7, 0x26, 0x80, 0x91, 0x24, 0x0b, 0x7d, - 0x81, 0xc6, 0x5a, 0xb8, 0x10, 0xbe, 0xfd, 0x65, 0xd0, 0x1e, 0xff, 0x43, 0x3a, 0xf1, 0x06, 0x3a, - 0x83, 0x6a, 0x2a, 0x18, 0xe8, 0x41, 0x9a, 0x75, 0x33, 0x57, 0xda, 0xc3, 0xbf, 0xce, 0x13, 0xc5, - 0x8f, 0x50, 0xcb, 0x6c, 0x18, 0x1d, 0xa4, 0x39, 0x79, 0x09, 0xd2, 0x1e, 0x15, 0x20, 0x62, 0xdd, - 0xb6, 0x82, 0x3e, 0x43, 0x3d, 0xbb, 0x63, 0x94, 0x21, 0xe6, 0x06, 0x45, 0xc3, 0x45, 0x90, 0x94, - 0xb8, 0x0e, 0xdb, 0x72, 0x85, 0x68, 0x3f, 0xcd, 0xc8, 0x84, 0x44, 0xd3, 0xf2, 0x46, 0xb1, 0xc8, - 0xc9, 0xf1, 0xa7, 0xa3, 0xa9, 0x2b, 0x66, 0xbe, 0x4d, 0x26, 0x6c, 0xd1, 0x59, 0x30, 0xee, 0x5f, - 0x59, 0x1d, 0x7b, 0x6e, 0x71, 0xd1, 0xc9, 0xf9, 0xa0, 0xda, 0xdb, 0x61, 0xf3, 0xd9, 0xef, 0x00, - 0x00, 0x00, 0xff, 0xff, 0xd2, 0x53, 0x25, 0x64, 0x6e, 0x05, 0x00, 0x00, + // 759 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0xdd, 0x4e, 0xdb, 0x48, + 0x18, 0x5d, 0x27, 0x38, 0x90, 0x2f, 0x24, 0xf1, 0xce, 0x42, 0x00, 0x6f, 0xd8, 0x5d, 0x79, 0x77, + 0x25, 0xf0, 0x2e, 0xb6, 0x36, 0xdb, 0x9b, 0x82, 0xda, 0x2a, 0x34, 0x11, 0x20, 0xa2, 0x50, 0x39, + 0x40, 0x05, 0x52, 0x45, 0x9d, 0x78, 0x20, 0x56, 0x1c, 0x3b, 0x8d, 0xc7, 0x15, 0xa8, 0xea, 0x4d, + 0x5f, 0xa1, 0xb7, 0x7d, 0x93, 0x5e, 0xf6, 0x11, 0xfa, 0x0a, 0x7d, 0x90, 0x6a, 0x66, 0xec, 0xc4, + 0x26, 0x3f, 0xe5, 0x06, 0xf9, 0xfb, 0x3b, 0xe7, 0xcc, 0x7c, 0x67, 0x08, 0xfc, 0x35, 0x18, 0x7a, + 0xc4, 0x6b, 0x07, 0xd7, 0xba, 0x65, 0xfb, 0x64, 0x68, 0xb7, 0x03, 0x82, 0x63, 0x9f, 0x1a, 0x2b, + 0x23, 0x18, 0x67, 0xe4, 0x8d, 0x1b, 0xcf, 0xbb, 0x71, 0xb0, 0x3e, 0x1a, 0x34, 0xdd, 0x3b, 0xde, + 0x26, 0xff, 0x7a, 0xbf, 0x84, 0xfb, 0x03, 0x12, 0x15, 0xe5, 0x51, 0xd6, 0x76, 0x2d, 0x7c, 0xcb, + 0xff, 0x86, 0xb5, 0x72, 0x38, 0x68, 0x0e, 0x6c, 0xdd, 0x74, 0x5d, 0x8f, 0x98, 0xc4, 0xf6, 0x5c, + 0x9f, 0x57, 0x95, 0x4f, 0x02, 0x94, 0x9a, 0x9e, 0x85, 0x0f, 0xb1, 0xe9, 0x90, 0xee, 0xf3, 0x2e, + 0xee, 0xf4, 0x0c, 0xfc, 0x26, 0xc0, 0x3e, 0x41, 0x4f, 0x41, 0x1c, 0x0c, 0xbd, 0x36, 0x5e, 0x17, + 0xfe, 0x10, 0xb6, 0x0a, 0x95, 0x2d, 0x2d, 0x26, 0x7d, 0xfa, 0x88, 0xf6, 0x82, 0xf6, 0x1b, 0x7c, + 0x4c, 0xd9, 0x07, 0x91, 0xc5, 0x28, 0x07, 0x8b, 0x67, 0xcd, 0xe3, 0xe6, 0xc9, 0xcb, 0xa6, 0xf4, + 0x13, 0x2a, 0x42, 0xee, 0xb0, 0x5e, 0x6d, 0x9c, 0x1e, 0x1e, 0x35, 0xeb, 0xad, 0x96, 0x24, 0xa0, + 0x65, 0x58, 0x6a, 0x1c, 0x9d, 0xd7, 0x59, 0x94, 0x42, 0x79, 0xc8, 0x1a, 0xf5, 0x6a, 0x8d, 0x17, + 0xd3, 0xca, 0x67, 0x01, 0xd6, 0x26, 0xb8, 0xfc, 0x81, 0xe7, 0xfa, 0x18, 0x3d, 0x03, 0xd1, 0x27, + 0x26, 0x89, 0xf4, 0x6d, 0xcf, 0xd5, 0xc7, 0x67, 0xb4, 0x16, 0x1d, 0x30, 0xf8, 0x9c, 0x72, 0x05, + 0x22, 0x8b, 0x93, 0x02, 0x73, 0xb0, 0xc8, 0x05, 0x5e, 0x48, 0x02, 0x95, 0x73, 0xd6, 0x8c, 0xc2, + 0x14, 0xca, 0x82, 0x58, 0xa5, 0x62, 0xa5, 0x34, 0x5a, 0x82, 0x85, 0x5a, 0xbd, 0x5a, 0x93, 0x16, + 0x68, 0x92, 0x4a, 0xbe, 0x90, 0x44, 0xda, 0xde, 0x3c, 0x39, 0xbd, 0xe2, 0x61, 0x46, 0x29, 0x03, + 0x1c, 0x60, 0x12, 0xdd, 0x67, 0x01, 0x52, 0xb6, 0xc5, 0xc4, 0x66, 0x8d, 0x94, 0x6d, 0x29, 0x7b, + 0x90, 0x63, 0xd5, 0xf0, 0x38, 0xff, 0x42, 0xe6, 0xda, 0xc6, 0x8e, 0xe5, 0xb3, 0x96, 0x5c, 0x65, + 0x45, 0xe3, 0x8b, 0xd3, 0xa2, 0xdd, 0x6a, 0x55, 0xf7, 0xce, 0x08, 0x7b, 0x94, 0x06, 0x2c, 0x1f, + 0xd1, 0x25, 0xcf, 0x00, 0x8f, 0xa1, 0xa5, 0x1e, 0x80, 0xf6, 0x3b, 0xe4, 0x6b, 0xd8, 0xc1, 0x04, + 0xcf, 0xd2, 0x5a, 0x05, 0x69, 0x3f, 0x70, 0x7a, 0x09, 0xca, 0x1d, 0xc8, 0x5a, 0x5e, 0x27, 0xe8, + 0x63, 0x97, 0x50, 0xcd, 0xe9, 0xad, 0x5c, 0xa5, 0xa8, 0x71, 0xe7, 0xd5, 0xc2, 0xbc, 0x31, 0xee, + 0x50, 0xb6, 0xe1, 0xe7, 0x18, 0x44, 0x78, 0xe8, 0x15, 0x10, 0x3b, 0x5e, 0xe0, 0x12, 0x46, 0x25, + 0x1a, 0x3c, 0x50, 0xfe, 0xe6, 0xad, 0x49, 0x49, 0x12, 0xa4, 0x6d, 0x8b, 0x13, 0x65, 0x0d, 0xfa, + 0xa9, 0xa8, 0x80, 0xe2, 0x6d, 0x73, 0x21, 0x1b, 0x90, 0x6f, 0x61, 0x73, 0xd8, 0xe9, 0x46, 0x70, + 0x7b, 0x50, 0xf0, 0x59, 0xe2, 0x6a, 0xc8, 0x33, 0x73, 0xaf, 0x3d, 0xef, 0xc7, 0x87, 0x95, 0x63, + 0x28, 0x44, 0x68, 0x21, 0xeb, 0x63, 0xc8, 0x8f, 0xe0, 0xfc, 0xc0, 0x99, 0x8f, 0xb6, 0x1c, 0xa1, + 0xd1, 0xce, 0xca, 0x17, 0x11, 0xa0, 0x36, 0xb2, 0x2e, 0xba, 0x85, 0xe2, 0x3d, 0xf7, 0x22, 0xe5, + 0xc7, 0x4f, 0x4f, 0xfe, 0xf3, 0x01, 0xf6, 0x57, 0xca, 0x1f, 0xbe, 0x7e, 0xfb, 0x98, 0x2a, 0xa1, + 0x15, 0xfd, 0xed, 0x7f, 0xba, 0xeb, 0x59, 0x58, 0xef, 0xb2, 0xae, 0x0e, 0xa3, 0x39, 0x83, 0xf4, + 0x01, 0x26, 0xa8, 0x14, 0x47, 0x1a, 0xfb, 0x57, 0x5e, 0x9b, 0xc8, 0x87, 0xa8, 0x9b, 0x0c, 0x75, + 0x0d, 0xad, 0x52, 0xd4, 0xd1, 0xc2, 0xf5, 0x77, 0xb6, 0xf5, 0x44, 0x55, 0xdf, 0x23, 0x0f, 0x44, + 0xb6, 0x74, 0xb4, 0x1e, 0x07, 0x88, 0x5b, 0x49, 0x2e, 0x4d, 0x5c, 0x53, 0x9d, 0xfe, 0x77, 0x53, + 0x1e, 0x31, 0x64, 0x4d, 0xce, 0x27, 0x90, 0x77, 0x05, 0xf5, 0x52, 0x96, 0xa7, 0xb3, 0xed, 0x0a, + 0x2a, 0xba, 0x84, 0x0c, 0xf7, 0x04, 0xda, 0x88, 0x33, 0x26, 0xec, 0x34, 0x93, 0x32, 0x3c, 0x8c, + 0x3a, 0xe3, 0x30, 0xaf, 0x20, 0x3b, 0x72, 0x31, 0x2a, 0xc7, 0xe1, 0xef, 0xbf, 0x0f, 0x79, 0x73, + 0x46, 0x35, 0xbc, 0xb5, 0x5f, 0x18, 0x51, 0x5e, 0x5e, 0xa2, 0x44, 0xed, 0xc0, 0xe9, 0x51, 0xe9, + 0xaf, 0x01, 0xc6, 0x96, 0x46, 0x13, 0x08, 0xc9, 0x23, 0xfc, 0x36, 0xab, 0x9c, 0x64, 0x50, 0x13, + 0x0c, 0xe7, 0x90, 0xe1, 0xd6, 0x4d, 0x5e, 0x4e, 0xe2, 0x71, 0xc8, 0xf2, 0xb4, 0x52, 0x88, 0xba, + 0xca, 0x50, 0x8b, 0x0a, 0x50, 0x54, 0x6e, 0xe4, 0x5d, 0x41, 0xdd, 0xdf, 0xb9, 0xfc, 0xe7, 0xc6, + 0x26, 0xdd, 0xa0, 0xad, 0x75, 0xbc, 0xbe, 0xde, 0xf7, 0xfc, 0xa0, 0x67, 0xea, 0x6d, 0xc7, 0xf4, + 0x89, 0x3e, 0xe5, 0x67, 0xb0, 0x9d, 0x61, 0xc9, 0xff, 0xbf, 0x07, 0x00, 0x00, 0xff, 0xff, 0x7f, + 0xf4, 0x78, 0x1a, 0x24, 0x07, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -550,9 +661,11 @@ const _ = grpc.SupportPackageIsVersion4 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type DistributeClient interface { NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) - GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) - IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Distribute_IndexDocumentClient, error) - DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Distribute_DeleteDocumentClient, error) + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) + Index(ctx context.Context, in *IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) + BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) + BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) } @@ -573,81 +686,49 @@ func (c *distributeClient) NodeHealthCheck(ctx context.Context, in *NodeHealthCh return out, nil } -func (c *distributeClient) GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) { - out := new(GetDocumentResponse) - err := c.cc.Invoke(ctx, "/distribute.Distribute/GetDocument", in, out, opts...) +func (c *distributeClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { + out := new(GetResponse) + err := c.cc.Invoke(ctx, "/distribute.Distribute/Get", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *distributeClient) IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Distribute_IndexDocumentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Distribute_serviceDesc.Streams[0], "/distribute.Distribute/IndexDocument", opts...) +func (c *distributeClient) Index(ctx context.Context, in *IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/distribute.Distribute/Index", in, out, opts...) if err != nil { return nil, err } - x := &distributeIndexDocumentClient{stream} - return x, nil -} - -type Distribute_IndexDocumentClient interface { - Send(*IndexDocumentRequest) error - CloseAndRecv() (*IndexDocumentResponse, error) - grpc.ClientStream -} - -type distributeIndexDocumentClient struct { - grpc.ClientStream -} - -func (x *distributeIndexDocumentClient) Send(m *IndexDocumentRequest) error { - return x.ClientStream.SendMsg(m) + return out, nil } -func (x *distributeIndexDocumentClient) CloseAndRecv() (*IndexDocumentResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(IndexDocumentResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { +func (c *distributeClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/distribute.Distribute/Delete", in, out, opts...) + if err != nil { return nil, err } - return m, nil + return out, nil } -func (c *distributeClient) DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Distribute_DeleteDocumentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Distribute_serviceDesc.Streams[1], "/distribute.Distribute/DeleteDocument", opts...) +func (c *distributeClient) BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) { + out := new(BulkIndexResponse) + err := c.cc.Invoke(ctx, "/distribute.Distribute/BulkIndex", in, out, opts...) if err != nil { return nil, err } - x := &distributeDeleteDocumentClient{stream} - return x, nil -} - -type Distribute_DeleteDocumentClient interface { - Send(*DeleteDocumentRequest) error - CloseAndRecv() (*DeleteDocumentResponse, error) - grpc.ClientStream -} - -type distributeDeleteDocumentClient struct { - grpc.ClientStream -} - -func (x *distributeDeleteDocumentClient) Send(m *DeleteDocumentRequest) error { - return x.ClientStream.SendMsg(m) + return out, nil } -func (x *distributeDeleteDocumentClient) CloseAndRecv() (*DeleteDocumentResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(DeleteDocumentResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { +func (c *distributeClient) BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) { + out := new(BulkDeleteResponse) + err := c.cc.Invoke(ctx, "/distribute.Distribute/BulkDelete", in, out, opts...) + if err != nil { return nil, err } - return m, nil + return out, nil } func (c *distributeClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { @@ -662,12 +743,40 @@ func (c *distributeClient) Search(ctx context.Context, in *SearchRequest, opts . // DistributeServer is the server API for Distribute service. type DistributeServer interface { NodeHealthCheck(context.Context, *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) - GetDocument(context.Context, *GetDocumentRequest) (*GetDocumentResponse, error) - IndexDocument(Distribute_IndexDocumentServer) error - DeleteDocument(Distribute_DeleteDocumentServer) error + Get(context.Context, *GetRequest) (*GetResponse, error) + Index(context.Context, *IndexRequest) (*empty.Empty, error) + Delete(context.Context, *DeleteRequest) (*empty.Empty, error) + BulkIndex(context.Context, *BulkIndexRequest) (*BulkIndexResponse, error) + BulkDelete(context.Context, *BulkDeleteRequest) (*BulkDeleteResponse, error) Search(context.Context, *SearchRequest) (*SearchResponse, error) } +// UnimplementedDistributeServer can be embedded to have forward compatible implementations. +type UnimplementedDistributeServer struct { +} + +func (*UnimplementedDistributeServer) NodeHealthCheck(ctx context.Context, req *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeHealthCheck not implemented") +} +func (*UnimplementedDistributeServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (*UnimplementedDistributeServer) Index(ctx context.Context, req *IndexRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Index not implemented") +} +func (*UnimplementedDistributeServer) Delete(ctx context.Context, req *DeleteRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (*UnimplementedDistributeServer) BulkIndex(ctx context.Context, req *BulkIndexRequest) (*BulkIndexResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BulkIndex not implemented") +} +func (*UnimplementedDistributeServer) BulkDelete(ctx context.Context, req *BulkDeleteRequest) (*BulkDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BulkDelete not implemented") +} +func (*UnimplementedDistributeServer) Search(ctx context.Context, req *SearchRequest) (*SearchResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Search not implemented") +} + func RegisterDistributeServer(s *grpc.Server, srv DistributeServer) { s.RegisterService(&_Distribute_serviceDesc, srv) } @@ -690,74 +799,94 @@ func _Distribute_NodeHealthCheck_Handler(srv interface{}, ctx context.Context, d return interceptor(ctx, in, info, handler) } -func _Distribute_GetDocument_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetDocumentRequest) +func _Distribute_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(DistributeServer).GetDocument(ctx, in) + return srv.(DistributeServer).Get(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/distribute.Distribute/GetDocument", + FullMethod: "/distribute.Distribute/Get", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).GetDocument(ctx, req.(*GetDocumentRequest)) + return srv.(DistributeServer).Get(ctx, req.(*GetRequest)) } return interceptor(ctx, in, info, handler) } -func _Distribute_IndexDocument_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(DistributeServer).IndexDocument(&distributeIndexDocumentServer{stream}) -} - -type Distribute_IndexDocumentServer interface { - SendAndClose(*IndexDocumentResponse) error - Recv() (*IndexDocumentRequest, error) - grpc.ServerStream -} - -type distributeIndexDocumentServer struct { - grpc.ServerStream -} - -func (x *distributeIndexDocumentServer) SendAndClose(m *IndexDocumentResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *distributeIndexDocumentServer) Recv() (*IndexDocumentRequest, error) { - m := new(IndexDocumentRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { +func _Distribute_Index_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(IndexRequest) + if err := dec(in); err != nil { return nil, err } - return m, nil -} - -func _Distribute_DeleteDocument_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(DistributeServer).DeleteDocument(&distributeDeleteDocumentServer{stream}) -} - -type Distribute_DeleteDocumentServer interface { - SendAndClose(*DeleteDocumentResponse) error - Recv() (*DeleteDocumentRequest, error) - grpc.ServerStream + if interceptor == nil { + return srv.(DistributeServer).Index(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/distribute.Distribute/Index", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DistributeServer).Index(ctx, req.(*IndexRequest)) + } + return interceptor(ctx, in, info, handler) } -type distributeDeleteDocumentServer struct { - grpc.ServerStream +func _Distribute_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DistributeServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/distribute.Distribute/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DistributeServer).Delete(ctx, req.(*DeleteRequest)) + } + return interceptor(ctx, in, info, handler) } -func (x *distributeDeleteDocumentServer) SendAndClose(m *DeleteDocumentResponse) error { - return x.ServerStream.SendMsg(m) +func _Distribute_BulkIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BulkIndexRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DistributeServer).BulkIndex(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/distribute.Distribute/BulkIndex", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DistributeServer).BulkIndex(ctx, req.(*BulkIndexRequest)) + } + return interceptor(ctx, in, info, handler) } -func (x *distributeDeleteDocumentServer) Recv() (*DeleteDocumentRequest, error) { - m := new(DeleteDocumentRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { +func _Distribute_BulkDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BulkDeleteRequest) + if err := dec(in); err != nil { return nil, err } - return m, nil + if interceptor == nil { + return srv.(DistributeServer).BulkDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/distribute.Distribute/BulkDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DistributeServer).BulkDelete(ctx, req.(*BulkDeleteRequest)) + } + return interceptor(ctx, in, info, handler) } func _Distribute_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { @@ -787,25 +916,30 @@ var _Distribute_serviceDesc = grpc.ServiceDesc{ Handler: _Distribute_NodeHealthCheck_Handler, }, { - MethodName: "GetDocument", - Handler: _Distribute_GetDocument_Handler, + MethodName: "Get", + Handler: _Distribute_Get_Handler, }, { - MethodName: "Search", - Handler: _Distribute_Search_Handler, + MethodName: "Index", + Handler: _Distribute_Index_Handler, + }, + { + MethodName: "Delete", + Handler: _Distribute_Delete_Handler, }, - }, - Streams: []grpc.StreamDesc{ { - StreamName: "IndexDocument", - Handler: _Distribute_IndexDocument_Handler, - ClientStreams: true, + MethodName: "BulkIndex", + Handler: _Distribute_BulkIndex_Handler, }, { - StreamName: "DeleteDocument", - Handler: _Distribute_DeleteDocument_Handler, - ClientStreams: true, + MethodName: "BulkDelete", + Handler: _Distribute_BulkDelete_Handler, + }, + { + MethodName: "Search", + Handler: _Distribute_Search_Handler, }, }, + Streams: []grpc.StreamDesc{}, Metadata: "protobuf/distribute/distribute.proto", } diff --git a/protobuf/distribute/distribute.pb.gw.go b/protobuf/distribute/distribute.pb.gw.go new file mode 100644 index 0000000..e540253 --- /dev/null +++ b/protobuf/distribute/distribute.pb.gw.go @@ -0,0 +1,443 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: protobuf/distribute/distribute.proto + +/* +Package distribute is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package distribute + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +var ( + filter_Distribute_NodeHealthCheck_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Distribute_NodeHealthCheck_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq NodeHealthCheckRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Distribute_NodeHealthCheck_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.NodeHealthCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Distribute_Get_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Distribute_Index_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq IndexRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Index(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Distribute_Index_1(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq IndexRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Index(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Distribute_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Delete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Distribute_BulkIndex_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkIndexRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BulkIndex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Distribute_BulkDelete_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkDeleteRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BulkDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Distribute_Search_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SearchRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Search(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +// RegisterDistributeHandlerFromEndpoint is same as RegisterDistributeHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterDistributeHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterDistributeHandler(ctx, mux, conn) +} + +// RegisterDistributeHandler registers the http handlers for service Distribute to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterDistributeHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterDistributeHandlerClient(ctx, mux, NewDistributeClient(conn)) +} + +// RegisterDistributeHandlerClient registers the http handlers for service Distribute +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "DistributeClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "DistributeClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "DistributeClient" to call the correct interceptors. +func RegisterDistributeHandlerClient(ctx context.Context, mux *runtime.ServeMux, client DistributeClient) error { + + mux.Handle("GET", pattern_Distribute_NodeHealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Distribute_NodeHealthCheck_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Distribute_NodeHealthCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Distribute_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Distribute_Get_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Distribute_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Distribute_Index_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Distribute_Index_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Distribute_Index_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Distribute_Index_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Distribute_Index_1(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Distribute_Index_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Distribute_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Distribute_Delete_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Distribute_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Distribute_BulkIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Distribute_BulkIndex_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Distribute_BulkIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Distribute_BulkDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Distribute_BulkDelete_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Distribute_BulkDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Distribute_Search_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Distribute_Search_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Distribute_Search_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Distribute_NodeHealthCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "healthcheck"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Distribute_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Distribute_Index_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "documents"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Distribute_Index_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Distribute_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Distribute_BulkIndex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "bulk"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Distribute_BulkDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "bulk"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Distribute_Search_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "search"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Distribute_NodeHealthCheck_0 = runtime.ForwardResponseMessage + + forward_Distribute_Get_0 = runtime.ForwardResponseMessage + + forward_Distribute_Index_0 = runtime.ForwardResponseMessage + + forward_Distribute_Index_1 = runtime.ForwardResponseMessage + + forward_Distribute_Delete_0 = runtime.ForwardResponseMessage + + forward_Distribute_BulkIndex_0 = runtime.ForwardResponseMessage + + forward_Distribute_BulkDelete_0 = runtime.ForwardResponseMessage + + forward_Distribute_Search_0 = runtime.ForwardResponseMessage +) diff --git a/protobuf/distribute/distribute.proto b/protobuf/distribute/distribute.proto index 53d49d5..beaf5a6 100644 --- a/protobuf/distribute/distribute.proto +++ b/protobuf/distribute/distribute.proto @@ -15,63 +15,114 @@ syntax = "proto3"; import "google/protobuf/any.proto"; +import "google/protobuf/empty.proto"; import "protobuf/index/index.proto"; +import "google/api/annotations.proto"; package distribute; option go_package = "github.com/mosuka/blast/protobuf/distribute"; service Distribute { - rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) {} + rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) { + option (google.api.http) = { + get: "/v1/node/healthcheck" + }; + } - rpc GetDocument (GetDocumentRequest) returns (GetDocumentResponse) {} - rpc IndexDocument (stream IndexDocumentRequest) returns (IndexDocumentResponse) {} - rpc DeleteDocument (stream DeleteDocumentRequest) returns (DeleteDocumentResponse) {} - rpc Search (SearchRequest) returns (SearchResponse) {} + rpc Get (GetRequest) returns (GetResponse) { + option (google.api.http) = { + get: "/v1/documents/{id=**}" + }; + } + rpc Index (IndexRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + put: "/v1/documents" + body: "*" + additional_bindings { + put: "/v1/documents/{id=**}" + body: "*" + } + }; + } + rpc Delete (DeleteRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/documents/{id=**}" + }; + } + rpc BulkIndex (BulkIndexRequest) returns (BulkIndexResponse) { + option (google.api.http) = { + put: "/v1/bulk" + body: "*" + }; + } + rpc BulkDelete (BulkDeleteRequest) returns (BulkDeleteResponse) { + option (google.api.http) = { + delete: "/v1/bulk" + body: "*" + }; + } + rpc Search (SearchRequest) returns (SearchResponse) { + option (google.api.http) = { + post: "/v1/search" + body: "*" + }; + } } message NodeHealthCheckRequest { enum Probe { - HEALTHINESS = 0; - LIVENESS = 1; - READINESS = 2; + UNKNOWN = 0; + HEALTHINESS = 1; + LIVENESS = 2; + READINESS = 3; } Probe probe = 1; } message NodeHealthCheckResponse { enum State { - HEALTHY = 0; - UNHEALTHY = 1; - ALIVE = 2; - DEAD = 3; - READY = 4; - NOT_READY = 5; + UNKNOWN = 0; + HEALTHY = 1; + UNHEALTHY = 2; + ALIVE = 3; + DEAD = 4; + READY = 5; + NOT_READY = 6; } State state = 1; } -message GetDocumentRequest { +message GetRequest { string id = 1; } -message GetDocumentResponse { - index.Document document = 1; +message GetResponse { + google.protobuf.Any fields = 1; } -message IndexDocumentRequest { - index.Document document = 1; +message IndexRequest { + string id = 1; + google.protobuf.Any fields = 2; +} + +message DeleteRequest { + string id = 1; } -message IndexDocumentResponse { +message BulkIndexRequest { + repeated index.Document documents = 1; +} + +message BulkIndexResponse { int32 count = 1; } -message DeleteDocumentRequest { - string id = 1; +message BulkDeleteRequest { + repeated string ids = 1; } -message DeleteDocumentResponse { +message BulkDeleteResponse { int32 count = 1; } diff --git a/protobuf/index/index.go b/protobuf/index/index.go index fd80b99..31a3023 100644 --- a/protobuf/index/index.go +++ b/protobuf/index/index.go @@ -1,3 +1,17 @@ +// Copyright (c) 2019 Minoru Osuka +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package index import ( @@ -5,7 +19,6 @@ import ( "errors" "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/protobuf" ) diff --git a/protobuf/index/index.pb.go b/protobuf/index/index.pb.go index f37f30a..b60dbee 100644 --- a/protobuf/index/index.pb.go +++ b/protobuf/index/index.pb.go @@ -9,7 +9,10 @@ import ( proto "github.com/golang/protobuf/proto" any "github.com/golang/protobuf/ptypes/any" empty "github.com/golang/protobuf/ptypes/empty" + _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" math "math" ) @@ -22,26 +25,29 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type NodeHealthCheckRequest_Probe int32 const ( - NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 0 - NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 1 - NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 2 + NodeHealthCheckRequest_UNKNOWN NodeHealthCheckRequest_Probe = 0 + NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 1 + NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 2 + NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 3 ) var NodeHealthCheckRequest_Probe_name = map[int32]string{ - 0: "HEALTHINESS", - 1: "LIVENESS", - 2: "READINESS", + 0: "UNKNOWN", + 1: "HEALTHINESS", + 2: "LIVENESS", + 3: "READINESS", } var NodeHealthCheckRequest_Probe_value = map[string]int32{ - "HEALTHINESS": 0, - "LIVENESS": 1, - "READINESS": 2, + "UNKNOWN": 0, + "HEALTHINESS": 1, + "LIVENESS": 2, + "READINESS": 3, } func (x NodeHealthCheckRequest_Probe) String() string { @@ -55,30 +61,33 @@ func (NodeHealthCheckRequest_Probe) EnumDescriptor() ([]byte, []int) { type NodeHealthCheckResponse_State int32 const ( - NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 0 - NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 1 - NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 2 - NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 3 - NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 4 - NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 5 + NodeHealthCheckResponse_UNKNOWN NodeHealthCheckResponse_State = 0 + NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 1 + NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 2 + NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 3 + NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 4 + NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 5 + NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 6 ) var NodeHealthCheckResponse_State_name = map[int32]string{ - 0: "HEALTHY", - 1: "UNHEALTHY", - 2: "ALIVE", - 3: "DEAD", - 4: "READY", - 5: "NOT_READY", + 0: "UNKNOWN", + 1: "HEALTHY", + 2: "UNHEALTHY", + 3: "ALIVE", + 4: "DEAD", + 5: "READY", + 6: "NOT_READY", } var NodeHealthCheckResponse_State_value = map[string]int32{ - "HEALTHY": 0, - "UNHEALTHY": 1, - "ALIVE": 2, - "DEAD": 3, - "READY": 4, - "NOT_READY": 5, + "UNKNOWN": 0, + "HEALTHY": 1, + "UNHEALTHY": 2, + "ALIVE": 3, + "DEAD": 4, + "READY": 5, + "NOT_READY": 6, } func (x NodeHealthCheckResponse_State) String() string { @@ -154,6 +163,46 @@ func (ClusterWatchResponse_Event) EnumDescriptor() ([]byte, []int) { return fileDescriptor_7b2daf652facb3ae, []int{9, 0} } +type Proposal_Event int32 + +const ( + Proposal_UNKNOWN Proposal_Event = 0 + Proposal_SET_NODE Proposal_Event = 1 + Proposal_DELETE_NODE Proposal_Event = 2 + Proposal_INDEX Proposal_Event = 3 + Proposal_DELETE Proposal_Event = 4 + Proposal_BULK_INDEX Proposal_Event = 5 + Proposal_BULK_DELETE Proposal_Event = 6 +) + +var Proposal_Event_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SET_NODE", + 2: "DELETE_NODE", + 3: "INDEX", + 4: "DELETE", + 5: "BULK_INDEX", + 6: "BULK_DELETE", +} + +var Proposal_Event_value = map[string]int32{ + "UNKNOWN": 0, + "SET_NODE": 1, + "DELETE_NODE": 2, + "INDEX": 3, + "DELETE": 4, + "BULK_INDEX": 5, + "BULK_DELETE": 6, +} + +func (x Proposal_Event) String() string { + return proto.EnumName(Proposal_Event_name, int32(x)) +} + +func (Proposal_Event) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{24, 0} +} + type NodeHealthCheckRequest struct { Probe NodeHealthCheckRequest_Probe `protobuf:"varint,1,opt,name=probe,proto3,enum=index.NodeHealthCheckRequest_Probe" json:"probe,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -190,7 +239,7 @@ func (m *NodeHealthCheckRequest) GetProbe() NodeHealthCheckRequest_Probe { if m != nil { return m.Probe } - return NodeHealthCheckRequest_HEALTHINESS + return NodeHealthCheckRequest_UNKNOWN } type NodeHealthCheckResponse struct { @@ -229,12 +278,13 @@ func (m *NodeHealthCheckResponse) GetState() NodeHealthCheckResponse_State { if m != nil { return m.State } - return NodeHealthCheckResponse_HEALTHY + return NodeHealthCheckResponse_UNKNOWN } type Metadata struct { GrpcAddress string `protobuf:"bytes,1,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` - HttpAddress string `protobuf:"bytes,2,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` + GrpcGatewayAddress string `protobuf:"bytes,2,opt,name=grpc_gateway_address,json=grpcGatewayAddress,proto3" json:"grpc_gateway_address,omitempty"` + HttpAddress string `protobuf:"bytes,3,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -272,6 +322,13 @@ func (m *Metadata) GetGrpcAddress() string { return "" } +func (m *Metadata) GetGrpcGatewayAddress() string { + if m != nil { + return m.GrpcGatewayAddress + } + return "" +} + func (m *Metadata) GetHttpAddress() string { if m != nil { return m.HttpAddress @@ -592,281 +649,368 @@ func (m *ClusterWatchResponse) GetCluster() *Cluster { return nil } -type Document struct { +type GetRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *Document) Reset() { *m = Document{} } -func (m *Document) String() string { return proto.CompactTextString(m) } -func (*Document) ProtoMessage() {} -func (*Document) Descriptor() ([]byte, []int) { +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} +func (*GetRequest) Descriptor() ([]byte, []int) { return fileDescriptor_7b2daf652facb3ae, []int{10} } -func (m *Document) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Document.Unmarshal(m, b) +func (m *GetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetRequest.Unmarshal(m, b) } -func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Document.Marshal(b, m, deterministic) +func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) } -func (m *Document) XXX_Merge(src proto.Message) { - xxx_messageInfo_Document.Merge(m, src) +func (m *GetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRequest.Merge(m, src) } -func (m *Document) XXX_Size() int { - return xxx_messageInfo_Document.Size(m) +func (m *GetRequest) XXX_Size() int { + return xxx_messageInfo_GetRequest.Size(m) } -func (m *Document) XXX_DiscardUnknown() { - xxx_messageInfo_Document.DiscardUnknown(m) +func (m *GetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRequest.DiscardUnknown(m) } -var xxx_messageInfo_Document proto.InternalMessageInfo +var xxx_messageInfo_GetRequest proto.InternalMessageInfo -func (m *Document) GetId() string { +func (m *GetRequest) GetId() string { if m != nil { return m.Id } return "" } -func (m *Document) GetFields() *any.Any { +type GetResponse struct { + // Document document = 1; + Fields *any.Any `protobuf:"bytes,1,opt,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} +func (*GetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{11} +} + +func (m *GetResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetResponse.Unmarshal(m, b) +} +func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) +} +func (m *GetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResponse.Merge(m, src) +} +func (m *GetResponse) XXX_Size() int { + return xxx_messageInfo_GetResponse.Size(m) +} +func (m *GetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetResponse proto.InternalMessageInfo + +func (m *GetResponse) GetFields() *any.Any { if m != nil { return m.Fields } return nil } -type GetDocumentRequest struct { +type IndexRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *GetDocumentRequest) Reset() { *m = GetDocumentRequest{} } -func (m *GetDocumentRequest) String() string { return proto.CompactTextString(m) } -func (*GetDocumentRequest) ProtoMessage() {} -func (*GetDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{11} +func (m *IndexRequest) Reset() { *m = IndexRequest{} } +func (m *IndexRequest) String() string { return proto.CompactTextString(m) } +func (*IndexRequest) ProtoMessage() {} +func (*IndexRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{12} } -func (m *GetDocumentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDocumentRequest.Unmarshal(m, b) +func (m *IndexRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IndexRequest.Unmarshal(m, b) } -func (m *GetDocumentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDocumentRequest.Marshal(b, m, deterministic) +func (m *IndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IndexRequest.Marshal(b, m, deterministic) } -func (m *GetDocumentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDocumentRequest.Merge(m, src) +func (m *IndexRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_IndexRequest.Merge(m, src) } -func (m *GetDocumentRequest) XXX_Size() int { - return xxx_messageInfo_GetDocumentRequest.Size(m) +func (m *IndexRequest) XXX_Size() int { + return xxx_messageInfo_IndexRequest.Size(m) } -func (m *GetDocumentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetDocumentRequest.DiscardUnknown(m) +func (m *IndexRequest) XXX_DiscardUnknown() { + xxx_messageInfo_IndexRequest.DiscardUnknown(m) } -var xxx_messageInfo_GetDocumentRequest proto.InternalMessageInfo +var xxx_messageInfo_IndexRequest proto.InternalMessageInfo -func (m *GetDocumentRequest) GetId() string { +func (m *IndexRequest) GetId() string { if m != nil { return m.Id } return "" } -type GetDocumentResponse struct { - Document *Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (m *IndexRequest) GetFields() *any.Any { + if m != nil { + return m.Fields + } + return nil } -func (m *GetDocumentResponse) Reset() { *m = GetDocumentResponse{} } -func (m *GetDocumentResponse) String() string { return proto.CompactTextString(m) } -func (*GetDocumentResponse) ProtoMessage() {} -func (*GetDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{12} +type DeleteRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *GetDocumentResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDocumentResponse.Unmarshal(m, b) +func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } +func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRequest) ProtoMessage() {} +func (*DeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{13} } -func (m *GetDocumentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDocumentResponse.Marshal(b, m, deterministic) + +func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) } -func (m *GetDocumentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDocumentResponse.Merge(m, src) +func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) } -func (m *GetDocumentResponse) XXX_Size() int { - return xxx_messageInfo_GetDocumentResponse.Size(m) +func (m *DeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteRequest.Merge(m, src) } -func (m *GetDocumentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetDocumentResponse.DiscardUnknown(m) +func (m *DeleteRequest) XXX_Size() int { + return xxx_messageInfo_DeleteRequest.Size(m) +} +func (m *DeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteRequest.DiscardUnknown(m) } -var xxx_messageInfo_GetDocumentResponse proto.InternalMessageInfo +var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo -func (m *GetDocumentResponse) GetDocument() *Document { +func (m *DeleteRequest) GetId() string { if m != nil { - return m.Document + return m.Id + } + return "" +} + +type Document struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} +func (*Document) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{14} +} + +func (m *Document) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Document.Unmarshal(m, b) +} +func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Document.Marshal(b, m, deterministic) +} +func (m *Document) XXX_Merge(src proto.Message) { + xxx_messageInfo_Document.Merge(m, src) +} +func (m *Document) XXX_Size() int { + return xxx_messageInfo_Document.Size(m) +} +func (m *Document) XXX_DiscardUnknown() { + xxx_messageInfo_Document.DiscardUnknown(m) +} + +var xxx_messageInfo_Document proto.InternalMessageInfo + +func (m *Document) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Document) GetFields() *any.Any { + if m != nil { + return m.Fields } return nil } -type IndexDocumentRequest struct { - Document *Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type BulkIndexRequest struct { + Documents []*Document `protobuf:"bytes,1,rep,name=documents,proto3" json:"documents,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *IndexDocumentRequest) Reset() { *m = IndexDocumentRequest{} } -func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) } -func (*IndexDocumentRequest) ProtoMessage() {} -func (*IndexDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{13} +func (m *BulkIndexRequest) Reset() { *m = BulkIndexRequest{} } +func (m *BulkIndexRequest) String() string { return proto.CompactTextString(m) } +func (*BulkIndexRequest) ProtoMessage() {} +func (*BulkIndexRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{15} } -func (m *IndexDocumentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexDocumentRequest.Unmarshal(m, b) +func (m *BulkIndexRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkIndexRequest.Unmarshal(m, b) } -func (m *IndexDocumentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexDocumentRequest.Marshal(b, m, deterministic) +func (m *BulkIndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkIndexRequest.Marshal(b, m, deterministic) } -func (m *IndexDocumentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexDocumentRequest.Merge(m, src) +func (m *BulkIndexRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkIndexRequest.Merge(m, src) } -func (m *IndexDocumentRequest) XXX_Size() int { - return xxx_messageInfo_IndexDocumentRequest.Size(m) +func (m *BulkIndexRequest) XXX_Size() int { + return xxx_messageInfo_BulkIndexRequest.Size(m) } -func (m *IndexDocumentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_IndexDocumentRequest.DiscardUnknown(m) +func (m *BulkIndexRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BulkIndexRequest.DiscardUnknown(m) } -var xxx_messageInfo_IndexDocumentRequest proto.InternalMessageInfo +var xxx_messageInfo_BulkIndexRequest proto.InternalMessageInfo -func (m *IndexDocumentRequest) GetDocument() *Document { +func (m *BulkIndexRequest) GetDocuments() []*Document { if m != nil { - return m.Document + return m.Documents } return nil } -type IndexDocumentResponse struct { +type BulkIndexResponse struct { Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *IndexDocumentResponse) Reset() { *m = IndexDocumentResponse{} } -func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) } -func (*IndexDocumentResponse) ProtoMessage() {} -func (*IndexDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{14} +func (m *BulkIndexResponse) Reset() { *m = BulkIndexResponse{} } +func (m *BulkIndexResponse) String() string { return proto.CompactTextString(m) } +func (*BulkIndexResponse) ProtoMessage() {} +func (*BulkIndexResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{16} } -func (m *IndexDocumentResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexDocumentResponse.Unmarshal(m, b) +func (m *BulkIndexResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkIndexResponse.Unmarshal(m, b) } -func (m *IndexDocumentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexDocumentResponse.Marshal(b, m, deterministic) +func (m *BulkIndexResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkIndexResponse.Marshal(b, m, deterministic) } -func (m *IndexDocumentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexDocumentResponse.Merge(m, src) +func (m *BulkIndexResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkIndexResponse.Merge(m, src) } -func (m *IndexDocumentResponse) XXX_Size() int { - return xxx_messageInfo_IndexDocumentResponse.Size(m) +func (m *BulkIndexResponse) XXX_Size() int { + return xxx_messageInfo_BulkIndexResponse.Size(m) } -func (m *IndexDocumentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_IndexDocumentResponse.DiscardUnknown(m) +func (m *BulkIndexResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BulkIndexResponse.DiscardUnknown(m) } -var xxx_messageInfo_IndexDocumentResponse proto.InternalMessageInfo +var xxx_messageInfo_BulkIndexResponse proto.InternalMessageInfo -func (m *IndexDocumentResponse) GetCount() int32 { +func (m *BulkIndexResponse) GetCount() int32 { if m != nil { return m.Count } return 0 } -type DeleteDocumentRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +type BulkDeleteRequest struct { + Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *DeleteDocumentRequest) Reset() { *m = DeleteDocumentRequest{} } -func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteDocumentRequest) ProtoMessage() {} -func (*DeleteDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{15} +func (m *BulkDeleteRequest) Reset() { *m = BulkDeleteRequest{} } +func (m *BulkDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*BulkDeleteRequest) ProtoMessage() {} +func (*BulkDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{17} } -func (m *DeleteDocumentRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteDocumentRequest.Unmarshal(m, b) +func (m *BulkDeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkDeleteRequest.Unmarshal(m, b) } -func (m *DeleteDocumentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteDocumentRequest.Marshal(b, m, deterministic) +func (m *BulkDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkDeleteRequest.Marshal(b, m, deterministic) } -func (m *DeleteDocumentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteDocumentRequest.Merge(m, src) +func (m *BulkDeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkDeleteRequest.Merge(m, src) } -func (m *DeleteDocumentRequest) XXX_Size() int { - return xxx_messageInfo_DeleteDocumentRequest.Size(m) +func (m *BulkDeleteRequest) XXX_Size() int { + return xxx_messageInfo_BulkDeleteRequest.Size(m) } -func (m *DeleteDocumentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteDocumentRequest.DiscardUnknown(m) +func (m *BulkDeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BulkDeleteRequest.DiscardUnknown(m) } -var xxx_messageInfo_DeleteDocumentRequest proto.InternalMessageInfo +var xxx_messageInfo_BulkDeleteRequest proto.InternalMessageInfo -func (m *DeleteDocumentRequest) GetId() string { +func (m *BulkDeleteRequest) GetIds() []string { if m != nil { - return m.Id + return m.Ids } - return "" + return nil } -type DeleteDocumentResponse struct { +type BulkDeleteResponse struct { Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *DeleteDocumentResponse) Reset() { *m = DeleteDocumentResponse{} } -func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteDocumentResponse) ProtoMessage() {} -func (*DeleteDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{16} +func (m *BulkDeleteResponse) Reset() { *m = BulkDeleteResponse{} } +func (m *BulkDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*BulkDeleteResponse) ProtoMessage() {} +func (*BulkDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{18} } -func (m *DeleteDocumentResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteDocumentResponse.Unmarshal(m, b) +func (m *BulkDeleteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkDeleteResponse.Unmarshal(m, b) } -func (m *DeleteDocumentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteDocumentResponse.Marshal(b, m, deterministic) +func (m *BulkDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkDeleteResponse.Marshal(b, m, deterministic) } -func (m *DeleteDocumentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteDocumentResponse.Merge(m, src) +func (m *BulkDeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkDeleteResponse.Merge(m, src) } -func (m *DeleteDocumentResponse) XXX_Size() int { - return xxx_messageInfo_DeleteDocumentResponse.Size(m) +func (m *BulkDeleteResponse) XXX_Size() int { + return xxx_messageInfo_BulkDeleteResponse.Size(m) } -func (m *DeleteDocumentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteDocumentResponse.DiscardUnknown(m) +func (m *BulkDeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BulkDeleteResponse.DiscardUnknown(m) } -var xxx_messageInfo_DeleteDocumentResponse proto.InternalMessageInfo +var xxx_messageInfo_BulkDeleteResponse proto.InternalMessageInfo -func (m *DeleteDocumentResponse) GetCount() int32 { +func (m *BulkDeleteResponse) GetCount() int32 { if m != nil { return m.Count } @@ -884,7 +1028,7 @@ func (m *SearchRequest) Reset() { *m = SearchRequest{} } func (m *SearchRequest) String() string { return proto.CompactTextString(m) } func (*SearchRequest) ProtoMessage() {} func (*SearchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{17} + return fileDescriptor_7b2daf652facb3ae, []int{19} } func (m *SearchRequest) XXX_Unmarshal(b []byte) error { @@ -923,7 +1067,7 @@ func (m *SearchResponse) Reset() { *m = SearchResponse{} } func (m *SearchResponse) String() string { return proto.CompactTextString(m) } func (*SearchResponse) ProtoMessage() {} func (*SearchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{18} + return fileDescriptor_7b2daf652facb3ae, []int{20} } func (m *SearchResponse) XXX_Unmarshal(b []byte) error { @@ -964,7 +1108,7 @@ func (m *IndexConfig) Reset() { *m = IndexConfig{} } func (m *IndexConfig) String() string { return proto.CompactTextString(m) } func (*IndexConfig) ProtoMessage() {} func (*IndexConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{19} + return fileDescriptor_7b2daf652facb3ae, []int{21} } func (m *IndexConfig) XXX_Unmarshal(b []byte) error { @@ -1017,7 +1161,7 @@ func (m *GetIndexConfigResponse) Reset() { *m = GetIndexConfigResponse{} func (m *GetIndexConfigResponse) String() string { return proto.CompactTextString(m) } func (*GetIndexConfigResponse) ProtoMessage() {} func (*GetIndexConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{20} + return fileDescriptor_7b2daf652facb3ae, []int{22} } func (m *GetIndexConfigResponse) XXX_Unmarshal(b []byte) error { @@ -1056,7 +1200,7 @@ func (m *GetIndexStatsResponse) Reset() { *m = GetIndexStatsResponse{} } func (m *GetIndexStatsResponse) String() string { return proto.CompactTextString(m) } func (*GetIndexStatsResponse) ProtoMessage() {} func (*GetIndexStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{21} + return fileDescriptor_7b2daf652facb3ae, []int{23} } func (m *GetIndexStatsResponse) XXX_Unmarshal(b []byte) error { @@ -1084,11 +1228,91 @@ func (m *GetIndexStatsResponse) GetIndexStats() *any.Any { return nil } +type Proposal struct { + Event Proposal_Event `protobuf:"varint,1,opt,name=event,proto3,enum=index.Proposal_Event" json:"event,omitempty"` + Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` + Document *Document `protobuf:"bytes,3,opt,name=document,proto3" json:"document,omitempty"` + Id string `protobuf:"bytes,4,opt,name=id,proto3" json:"id,omitempty"` + Documents []*Document `protobuf:"bytes,5,rep,name=documents,proto3" json:"documents,omitempty"` + Ids []string `protobuf:"bytes,6,rep,name=ids,proto3" json:"ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Proposal) Reset() { *m = Proposal{} } +func (m *Proposal) String() string { return proto.CompactTextString(m) } +func (*Proposal) ProtoMessage() {} +func (*Proposal) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{24} +} + +func (m *Proposal) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Proposal.Unmarshal(m, b) +} +func (m *Proposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Proposal.Marshal(b, m, deterministic) +} +func (m *Proposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_Proposal.Merge(m, src) +} +func (m *Proposal) XXX_Size() int { + return xxx_messageInfo_Proposal.Size(m) +} +func (m *Proposal) XXX_DiscardUnknown() { + xxx_messageInfo_Proposal.DiscardUnknown(m) +} + +var xxx_messageInfo_Proposal proto.InternalMessageInfo + +func (m *Proposal) GetEvent() Proposal_Event { + if m != nil { + return m.Event + } + return Proposal_UNKNOWN +} + +func (m *Proposal) GetNode() *Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *Proposal) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *Proposal) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Proposal) GetDocuments() []*Document { + if m != nil { + return m.Documents + } + return nil +} + +func (m *Proposal) GetIds() []string { + if m != nil { + return m.Ids + } + return nil +} + func init() { proto.RegisterEnum("index.NodeHealthCheckRequest_Probe", NodeHealthCheckRequest_Probe_name, NodeHealthCheckRequest_Probe_value) proto.RegisterEnum("index.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) proto.RegisterEnum("index.Node_State", Node_State_name, Node_State_value) proto.RegisterEnum("index.ClusterWatchResponse_Event", ClusterWatchResponse_Event_name, ClusterWatchResponse_Event_value) + proto.RegisterEnum("index.Proposal_Event", Proposal_Event_name, Proposal_Event_value) proto.RegisterType((*NodeHealthCheckRequest)(nil), "index.NodeHealthCheckRequest") proto.RegisterType((*NodeHealthCheckResponse)(nil), "index.NodeHealthCheckResponse") proto.RegisterType((*Metadata)(nil), "index.Metadata") @@ -1100,96 +1324,118 @@ func init() { proto.RegisterType((*ClusterLeaveRequest)(nil), "index.ClusterLeaveRequest") proto.RegisterType((*ClusterInfoResponse)(nil), "index.ClusterInfoResponse") proto.RegisterType((*ClusterWatchResponse)(nil), "index.ClusterWatchResponse") + proto.RegisterType((*GetRequest)(nil), "index.GetRequest") + proto.RegisterType((*GetResponse)(nil), "index.GetResponse") + proto.RegisterType((*IndexRequest)(nil), "index.IndexRequest") + proto.RegisterType((*DeleteRequest)(nil), "index.DeleteRequest") proto.RegisterType((*Document)(nil), "index.Document") - proto.RegisterType((*GetDocumentRequest)(nil), "index.GetDocumentRequest") - proto.RegisterType((*GetDocumentResponse)(nil), "index.GetDocumentResponse") - proto.RegisterType((*IndexDocumentRequest)(nil), "index.IndexDocumentRequest") - proto.RegisterType((*IndexDocumentResponse)(nil), "index.IndexDocumentResponse") - proto.RegisterType((*DeleteDocumentRequest)(nil), "index.DeleteDocumentRequest") - proto.RegisterType((*DeleteDocumentResponse)(nil), "index.DeleteDocumentResponse") + proto.RegisterType((*BulkIndexRequest)(nil), "index.BulkIndexRequest") + proto.RegisterType((*BulkIndexResponse)(nil), "index.BulkIndexResponse") + proto.RegisterType((*BulkDeleteRequest)(nil), "index.BulkDeleteRequest") + proto.RegisterType((*BulkDeleteResponse)(nil), "index.BulkDeleteResponse") proto.RegisterType((*SearchRequest)(nil), "index.SearchRequest") proto.RegisterType((*SearchResponse)(nil), "index.SearchResponse") proto.RegisterType((*IndexConfig)(nil), "index.IndexConfig") proto.RegisterType((*GetIndexConfigResponse)(nil), "index.GetIndexConfigResponse") proto.RegisterType((*GetIndexStatsResponse)(nil), "index.GetIndexStatsResponse") + proto.RegisterType((*Proposal)(nil), "index.Proposal") } func init() { proto.RegisterFile("protobuf/index/index.proto", fileDescriptor_7b2daf652facb3ae) } var fileDescriptor_7b2daf652facb3ae = []byte{ - // 1137 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x57, 0x5d, 0x73, 0xda, 0x46, - 0x14, 0x45, 0x80, 0x6c, 0x7c, 0x65, 0x08, 0xdd, 0xd8, 0x4e, 0x42, 0xe2, 0x36, 0xd9, 0xa6, 0x0d, - 0x33, 0x49, 0xa1, 0xe3, 0x8c, 0x27, 0x4d, 0xda, 0x4e, 0x07, 0x83, 0x62, 0x13, 0x13, 0xe1, 0x0a, - 0x3b, 0x9e, 0xf4, 0xc5, 0x23, 0xd0, 0x1a, 0x34, 0x06, 0x49, 0x45, 0x8b, 0xa7, 0x3c, 0xf6, 0xb5, - 0xef, 0xfd, 0x0f, 0xed, 0xcf, 0xe9, 0x7b, 0x7f, 0x4c, 0x67, 0x3f, 0x24, 0x4b, 0x32, 0x90, 0xe9, - 0x8b, 0xc7, 0x7b, 0xef, 0xb9, 0x67, 0xcf, 0xbd, 0xbb, 0x7b, 0x64, 0x43, 0xc5, 0x9f, 0x7a, 0xd4, - 0xeb, 0xcf, 0x2e, 0xeb, 0x8e, 0x6b, 0x93, 0xdf, 0xc4, 0xcf, 0x1a, 0x0f, 0x22, 0x95, 0x2f, 0x2a, - 0x0f, 0x86, 0x9e, 0x37, 0x1c, 0x93, 0x7a, 0x84, 0xb4, 0xdc, 0xb9, 0x40, 0x54, 0x1e, 0xa6, 0x53, - 0x64, 0xe2, 0x53, 0x99, 0xc4, 0x7f, 0x28, 0xb0, 0x63, 0x78, 0x36, 0x39, 0x22, 0xd6, 0x98, 0x8e, - 0x9a, 0x23, 0x32, 0xb8, 0x32, 0xc9, 0xaf, 0x33, 0x12, 0x50, 0xf4, 0x1a, 0x54, 0x7f, 0xea, 0xf5, - 0xc9, 0x7d, 0xe5, 0xb1, 0x52, 0x2d, 0xed, 0x7d, 0x59, 0x13, 0xdb, 0x2e, 0x46, 0xd7, 0x4e, 0x18, - 0xd4, 0x14, 0x15, 0x78, 0x1f, 0x54, 0xbe, 0x46, 0x77, 0x40, 0x3b, 0xd2, 0x1b, 0x9d, 0xd3, 0xa3, - 0xb6, 0xa1, 0xf7, 0x7a, 0xe5, 0x0c, 0xda, 0x84, 0x42, 0xa7, 0xfd, 0x41, 0xe7, 0x2b, 0x05, 0x15, - 0x61, 0xc3, 0xd4, 0x1b, 0x2d, 0x91, 0xcc, 0xe2, 0xbf, 0x15, 0xb8, 0x77, 0x8b, 0x3e, 0xf0, 0x3d, - 0x37, 0x20, 0xe8, 0x0d, 0xa8, 0x01, 0xb5, 0x68, 0xa8, 0xe6, 0xe9, 0x32, 0x35, 0x02, 0x5e, 0xeb, - 0x31, 0xac, 0x29, 0x4a, 0xb0, 0x09, 0x2a, 0x5f, 0x23, 0x0d, 0xd6, 0x85, 0x9c, 0x8f, 0xe5, 0x0c, - 0xdb, 0xfc, 0xcc, 0x08, 0x97, 0x0a, 0xda, 0x00, 0xb5, 0xc1, 0xa4, 0x95, 0xb3, 0xa8, 0x00, 0xf9, - 0x96, 0xde, 0x68, 0x95, 0x73, 0x2c, 0xc8, 0x04, 0x7e, 0x2c, 0xe7, 0x19, 0xdc, 0xe8, 0x9e, 0x5e, - 0x88, 0xa5, 0x8a, 0x4f, 0xa0, 0xf0, 0x9e, 0x50, 0xcb, 0xb6, 0xa8, 0x85, 0x9e, 0xc0, 0xe6, 0x70, - 0xea, 0x0f, 0x2e, 0x2c, 0xdb, 0x9e, 0x92, 0x20, 0xe0, 0x12, 0x37, 0x4c, 0x8d, 0xc5, 0x1a, 0x22, - 0xc4, 0x20, 0x23, 0x4a, 0xfd, 0x08, 0x92, 0x15, 0x10, 0x16, 0x93, 0x10, 0xfc, 0xaf, 0x02, 0x79, - 0xd6, 0x0e, 0x2a, 0x41, 0xd6, 0xb1, 0x25, 0x49, 0xd6, 0xb1, 0x59, 0x6d, 0xdf, 0x71, 0xed, 0x74, - 0x2d, 0x8b, 0x85, 0xf4, 0xcf, 0xc2, 0xe9, 0xe4, 0xf8, 0x74, 0x3e, 0x8b, 0x4d, 0x27, 0x31, 0x0a, - 0xf4, 0x1c, 0x0a, 0x13, 0x29, 0xfb, 0x7e, 0xfe, 0xb1, 0x52, 0xd5, 0xf6, 0xee, 0x48, 0x6c, 0xd8, - 0x8d, 0x19, 0x01, 0xf0, 0x71, 0x6c, 0x6e, 0x67, 0xc6, 0xb1, 0xd1, 0x3d, 0x37, 0xc4, 0x11, 0xbe, - 0xed, 0x76, 0x3a, 0xdd, 0x73, 0xdd, 0x14, 0x47, 0xd8, 0x6c, 0x18, 0xad, 0x76, 0xab, 0x71, 0xca, - 0x46, 0x07, 0xb0, 0xd6, 0xd1, 0x1b, 0x2d, 0xdd, 0x2c, 0xe7, 0x18, 0xb0, 0x77, 0x74, 0x76, 0xda, - 0x62, 0x65, 0x79, 0xfc, 0xbb, 0x02, 0xeb, 0xcd, 0xf1, 0x2c, 0xa0, 0x64, 0x8a, 0xea, 0xa0, 0xba, - 0x9e, 0x4d, 0xd8, 0xa4, 0x72, 0x55, 0x6d, 0xef, 0x81, 0x94, 0x20, 0xd3, 0x5c, 0x76, 0xa0, 0xbb, - 0x74, 0x3a, 0x37, 0x05, 0xae, 0xa2, 0x03, 0xdc, 0x04, 0x51, 0x19, 0x72, 0x57, 0x64, 0x2e, 0x27, - 0xc4, 0x7e, 0x45, 0x4f, 0x40, 0xbd, 0xb6, 0xc6, 0x33, 0xc2, 0x67, 0xa3, 0xed, 0x69, 0xb1, 0xfe, - 0x4d, 0x91, 0x79, 0x93, 0xfd, 0x4e, 0xc1, 0x2f, 0xa1, 0xcc, 0x42, 0x6d, 0xf7, 0xd2, 0x8b, 0x2e, - 0xd6, 0x17, 0x90, 0x67, 0x7b, 0x70, 0xb6, 0x54, 0x25, 0x4f, 0xe0, 0x7d, 0x40, 0x52, 0xd8, 0x3b, - 0xcf, 0x71, 0xc3, 0xd7, 0xf1, 0xc9, 0xb2, 0xaf, 0xe0, 0xae, 0x2c, 0xeb, 0x10, 0xeb, 0x9a, 0x84, - 0x75, 0xa9, 0xc3, 0xc5, 0x3f, 0x45, 0xb0, 0x84, 0xaa, 0x2a, 0xac, 0x0f, 0x44, 0x58, 0xee, 0x50, - 0x4a, 0xce, 0xc8, 0x0c, 0xd3, 0xf8, 0x1f, 0x05, 0xb6, 0x64, 0xf0, 0xdc, 0xa2, 0x83, 0x51, 0x44, - 0xf1, 0x0a, 0x54, 0x72, 0x4d, 0x5c, 0x2a, 0x5f, 0xcc, 0x93, 0x24, 0x41, 0x02, 0x5b, 0xd3, 0x19, - 0xd0, 0x14, 0xf8, 0xa8, 0xb5, 0xec, 0x92, 0xd6, 0xe2, 0xe2, 0x72, 0xab, 0xc5, 0xed, 0x83, 0xca, - 0xa9, 0x93, 0x37, 0xa8, 0x00, 0xf9, 0x77, 0xdd, 0xb6, 0x21, 0x1e, 0x5d, 0x47, 0x6f, 0x7c, 0x90, - 0x37, 0xe7, 0xec, 0x84, 0xdf, 0xa2, 0x1c, 0x3e, 0x82, 0x42, 0xcb, 0x1b, 0xcc, 0x26, 0xac, 0x32, - 0xfd, 0x1a, 0x5e, 0xc0, 0xda, 0xa5, 0x43, 0xc6, 0x76, 0x20, 0xf5, 0x6d, 0xd5, 0x84, 0xbf, 0xd5, - 0x42, 0x7f, 0xab, 0x35, 0xdc, 0xb9, 0x29, 0x31, 0xf8, 0x29, 0xa0, 0x43, 0x42, 0x43, 0xb2, 0x65, - 0x87, 0x70, 0x00, 0x77, 0x13, 0x28, 0x39, 0xc1, 0xe7, 0x50, 0xb0, 0x65, 0x4c, 0x9e, 0x42, 0xf8, - 0x58, 0x22, 0x68, 0x04, 0xc0, 0x4d, 0xd8, 0x6a, 0xb3, 0x5c, 0x7a, 0xaf, 0xff, 0x45, 0xf2, 0x0d, - 0x6c, 0xa7, 0x48, 0xa4, 0x94, 0x2d, 0x50, 0x07, 0xde, 0x4c, 0x52, 0xa8, 0xa6, 0x58, 0xe0, 0x67, - 0xb0, 0xdd, 0x22, 0x63, 0x42, 0xc9, 0xa7, 0x1a, 0xac, 0xc1, 0x4e, 0x1a, 0xb8, 0x92, 0xb8, 0x03, - 0xc5, 0x1e, 0xb1, 0xa6, 0xec, 0x86, 0x08, 0xc2, 0xef, 0xa1, 0x14, 0xf0, 0xc0, 0xc5, 0x54, 0x44, - 0x64, 0x2f, 0x8b, 0xa7, 0x5f, 0x0c, 0xe2, 0xc5, 0xf8, 0x18, 0x4a, 0x21, 0x9b, 0xdc, 0xf5, 0x35, - 0x14, 0x23, 0xba, 0x60, 0x36, 0x5e, 0xcd, 0xb6, 0x19, 0xb2, 0x31, 0x24, 0xfe, 0x53, 0x01, 0x8d, - 0xcf, 0xa8, 0xe9, 0xb9, 0x97, 0xce, 0x90, 0x51, 0xf1, 0x71, 0x5e, 0x4c, 0x2c, 0xdf, 0x77, 0xdc, - 0xe1, 0x6a, 0x2a, 0x0e, 0x7d, 0x2f, 0x90, 0x68, 0x17, 0x40, 0x94, 0xd2, 0xb9, 0x4f, 0xa4, 0xad, - 0x6e, 0xf0, 0xc8, 0xe9, 0xdc, 0x27, 0xe8, 0x05, 0x20, 0x91, 0x0e, 0xa8, 0x37, 0xb5, 0x86, 0x44, - 0xc0, 0x72, 0x1c, 0x56, 0xe6, 0x99, 0x9e, 0x48, 0x30, 0x34, 0xee, 0xc2, 0xce, 0x21, 0xa1, 0x31, - 0x65, 0x51, 0xb3, 0xfb, 0x20, 0xb6, 0xbd, 0x18, 0xf0, 0xb8, 0x14, 0x88, 0xe4, 0x2d, 0x88, 0x57, - 0x68, 0xce, 0xcd, 0x02, 0x1b, 0xb0, 0x1d, 0x12, 0x32, 0x17, 0x0e, 0x62, 0x7c, 0x5a, 0xa8, 0xcb, - 0xa2, 0xc1, 0xca, 0x7e, 0xc1, 0x89, 0xca, 0xf7, 0xfe, 0x5a, 0x07, 0x95, 0xb3, 0x21, 0x13, 0xee, - 0xa4, 0xbe, 0x9b, 0x68, 0x77, 0xe5, 0xd7, 0xbd, 0xf2, 0xf9, 0xea, 0xcf, 0x2d, 0xce, 0xa0, 0x1f, - 0xa1, 0x10, 0x5a, 0x2b, 0xda, 0xb9, 0xa5, 0x45, 0x67, 0x7f, 0x72, 0x54, 0xee, 0xc5, 0x58, 0xe2, - 0x6e, 0x87, 0x33, 0xe8, 0x00, 0xb4, 0x98, 0xc9, 0xa2, 0xd4, 0x17, 0x21, 0x66, 0xbc, 0x95, 0x25, - 0xe4, 0x38, 0x83, 0x5a, 0xb0, 0x19, 0x77, 0x5c, 0x54, 0x49, 0x92, 0xc4, 0x6d, 0x78, 0x05, 0x4b, - 0x33, 0x52, 0xb2, 0xb2, 0x97, 0x14, 0x79, 0xaa, 0x9d, 0xc3, 0x48, 0x0a, 0xf7, 0xd9, 0xa5, 0x2c, - 0x0f, 0x57, 0x98, 0x32, 0xce, 0x7c, 0xab, 0xa0, 0xb7, 0xa0, 0xc5, 0x9c, 0x29, 0x9a, 0xcb, 0x6d, - 0x4f, 0x8b, 0x04, 0x2d, 0x30, 0x32, 0x9c, 0x41, 0x06, 0x14, 0x13, 0xc6, 0x82, 0x1e, 0xc6, 0xaf, - 0x5f, 0x9a, 0xeb, 0xd1, 0xe2, 0x64, 0xc8, 0x56, 0x55, 0xd0, 0xcf, 0x50, 0x4a, 0x1a, 0x0a, 0x0a, - 0x6b, 0x16, 0x1a, 0x52, 0x65, 0x77, 0x49, 0x36, 0x46, 0xf9, 0x0a, 0xd6, 0x84, 0x4b, 0xa0, 0x2d, - 0x09, 0x4e, 0x58, 0x50, 0x65, 0x3b, 0x15, 0x8d, 0x7a, 0x6b, 0x43, 0x29, 0xf9, 0xf2, 0x96, 0x8e, - 0x7b, 0xf7, 0x66, 0x46, 0x0b, 0x1e, 0x2a, 0x3f, 0xb7, 0x62, 0xe2, 0xcd, 0x2d, 0x65, 0x7a, 0x94, - 0x62, 0x4a, 0xbc, 0x50, 0x9c, 0x41, 0x3f, 0x40, 0xa1, 0xe7, 0x5a, 0x7e, 0x30, 0xf2, 0xe8, 0x52, - 0x8e, 0xa5, 0x77, 0xf0, 0xa0, 0xfa, 0xcb, 0xd7, 0x43, 0x87, 0x8e, 0x66, 0xfd, 0xda, 0xc0, 0x9b, - 0xd4, 0x27, 0x5e, 0x30, 0xbb, 0xb2, 0xea, 0xfd, 0xb1, 0x15, 0xd0, 0x7a, 0xf2, 0x5f, 0x81, 0xfe, - 0x1a, 0x5f, 0xbf, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x14, 0x4f, 0xc0, 0x27, 0x23, 0x0c, 0x00, - 0x00, + // 1454 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0xdf, 0x72, 0xda, 0xc6, + 0x17, 0xb6, 0x00, 0x61, 0x7c, 0x04, 0x58, 0xd9, 0x60, 0x3b, 0x51, 0xec, 0x5f, 0xe2, 0xfd, 0x35, + 0xad, 0x4b, 0x5a, 0x48, 0x9d, 0x66, 0xda, 0x38, 0xed, 0x74, 0xb0, 0x51, 0x6d, 0x62, 0x02, 0x19, + 0x81, 0x93, 0x26, 0x33, 0x1d, 0x46, 0xc0, 0x1a, 0x54, 0x63, 0x89, 0x22, 0xe1, 0x96, 0xe9, 0xf4, + 0xa2, 0x79, 0x85, 0x4e, 0xa7, 0x6f, 0xd2, 0x17, 0xe8, 0x13, 0x74, 0x7a, 0x9b, 0xcb, 0x3e, 0x48, + 0x67, 0xff, 0x48, 0x48, 0xd8, 0x90, 0x76, 0x7a, 0xe3, 0x61, 0xcf, 0xf9, 0xce, 0xb7, 0xdf, 0x39, + 0x3a, 0xbb, 0x67, 0x0d, 0xda, 0x70, 0xe4, 0x78, 0x4e, 0x7b, 0x7c, 0x5a, 0xb4, 0xec, 0x2e, 0xf9, + 0x9e, 0xff, 0x2d, 0x30, 0x23, 0x92, 0xd9, 0x42, 0xbb, 0xd9, 0x73, 0x9c, 0xde, 0x80, 0x14, 0x03, + 0xa4, 0x69, 0x4f, 0x38, 0x42, 0xbb, 0x35, 0xeb, 0x22, 0xe7, 0x43, 0xcf, 0x77, 0x6e, 0x0a, 0xa7, + 0x39, 0xb4, 0x8a, 0xa6, 0x6d, 0x3b, 0x9e, 0xe9, 0x59, 0x8e, 0xed, 0x72, 0x2f, 0xfe, 0x55, 0x82, + 0xf5, 0x9a, 0xd3, 0x25, 0x47, 0xc4, 0x1c, 0x78, 0xfd, 0x83, 0x3e, 0xe9, 0x9c, 0x19, 0xe4, 0xdb, + 0x31, 0x71, 0x3d, 0xf4, 0x08, 0xe4, 0xe1, 0xc8, 0x69, 0x93, 0x1b, 0xd2, 0x1d, 0x69, 0x27, 0xbb, + 0xfb, 0xff, 0x02, 0x17, 0x75, 0x35, 0xba, 0xf0, 0x8c, 0x42, 0x0d, 0x1e, 0x81, 0xf7, 0x41, 0x66, + 0x6b, 0xa4, 0xc0, 0xf2, 0x49, 0xed, 0xb8, 0x56, 0x7f, 0x51, 0x53, 0x97, 0xd0, 0x2a, 0x28, 0x47, + 0x7a, 0xa9, 0xda, 0x3c, 0xaa, 0xd4, 0xf4, 0x46, 0x43, 0x95, 0x50, 0x1a, 0x52, 0xd5, 0xca, 0x73, + 0x9d, 0xad, 0x62, 0x28, 0x03, 0x2b, 0x86, 0x5e, 0x2a, 0x73, 0x67, 0x1c, 0xff, 0x26, 0xc1, 0xc6, + 0xa5, 0xbd, 0xdc, 0xa1, 0x63, 0xbb, 0x04, 0xed, 0x81, 0xec, 0x7a, 0xa6, 0xe7, 0x4b, 0x7b, 0x67, + 0x9e, 0x34, 0x0e, 0x2f, 0x34, 0x28, 0xd6, 0xe0, 0x21, 0xb8, 0x05, 0x32, 0x5b, 0x47, 0xb5, 0x29, + 0xb0, 0xcc, 0xb5, 0xbd, 0x54, 0x25, 0xaa, 0xe4, 0xa4, 0xe6, 0x2f, 0x63, 0x68, 0x05, 0xe4, 0x12, + 0xd5, 0xa9, 0xc6, 0x51, 0x0a, 0x12, 0x65, 0xbd, 0x54, 0x56, 0x13, 0xd4, 0x48, 0xd5, 0xbe, 0x54, + 0x65, 0x0a, 0xaf, 0xd5, 0x9b, 0x2d, 0xbe, 0x4c, 0xe2, 0xd7, 0x12, 0xa4, 0x9e, 0x12, 0xcf, 0xec, + 0x9a, 0x9e, 0x89, 0xb6, 0x21, 0xdd, 0x1b, 0x0d, 0x3b, 0x2d, 0xb3, 0xdb, 0x1d, 0x11, 0xd7, 0x65, + 0x82, 0x57, 0x0c, 0x85, 0xda, 0x4a, 0xdc, 0x84, 0xee, 0x43, 0x8e, 0x41, 0x7a, 0xa6, 0x47, 0xbe, + 0x33, 0x27, 0x01, 0x34, 0xc6, 0xa0, 0x88, 0xfa, 0x0e, 0xb9, 0xcb, 0x8f, 0xd8, 0x86, 0x74, 0xdf, + 0xf3, 0x86, 0x01, 0x32, 0xce, 0x49, 0xa9, 0x4d, 0x40, 0xf0, 0x1b, 0x09, 0x12, 0xb4, 0x1c, 0x28, + 0x0b, 0x31, 0xab, 0x2b, 0xb6, 0x8d, 0x59, 0x5d, 0x1a, 0xdb, 0xb6, 0xec, 0xee, 0xcc, 0x2e, 0x0a, + 0xb5, 0xf9, 0xf4, 0xef, 0xf9, 0xd5, 0x8d, 0xb3, 0xea, 0x5e, 0x0b, 0x55, 0x37, 0x52, 0x4a, 0x74, + 0x0f, 0x52, 0xe7, 0x22, 0xd1, 0x1b, 0x89, 0x3b, 0xd2, 0x8e, 0xb2, 0xbb, 0x2a, 0xb0, 0x7e, 0xfe, + 0x46, 0x00, 0xc0, 0xc7, 0x57, 0xd6, 0x3d, 0x0d, 0xa9, 0x2f, 0xeb, 0xd5, 0x6a, 0xfd, 0x85, 0x6e, + 0xf0, 0xc2, 0x1f, 0x94, 0x6a, 0xe5, 0x4a, 0xb9, 0xd4, 0xd4, 0xd5, 0x18, 0x02, 0x48, 0x56, 0xf5, + 0x52, 0x59, 0x37, 0xd4, 0x38, 0x05, 0x36, 0x8e, 0x4e, 0x9a, 0x65, 0x1a, 0x96, 0xc0, 0x3f, 0x49, + 0xb0, 0x7c, 0x30, 0x18, 0xbb, 0x1e, 0x19, 0xa1, 0x22, 0xc8, 0xb6, 0xd3, 0x25, 0xb4, 0xb6, 0xf1, + 0x1d, 0x65, 0xf7, 0xa6, 0x90, 0x20, 0xdc, 0x4c, 0xb6, 0xab, 0xdb, 0xde, 0x68, 0x62, 0x70, 0x9c, + 0xa6, 0x03, 0x4c, 0x8d, 0x48, 0x85, 0xf8, 0x19, 0x99, 0x88, 0x0a, 0xd1, 0x9f, 0x68, 0x1b, 0xe4, + 0x0b, 0x73, 0x30, 0x26, 0xac, 0x36, 0xca, 0xae, 0x12, 0xca, 0xdf, 0xe0, 0x9e, 0xbd, 0xd8, 0xa7, + 0x12, 0x7e, 0x00, 0x2a, 0x35, 0x55, 0xec, 0x53, 0x27, 0x68, 0xcc, 0xdb, 0x90, 0xa0, 0x7b, 0x30, + 0xb6, 0x99, 0x48, 0xe6, 0xc0, 0x0f, 0x01, 0x09, 0x61, 0x4f, 0x1c, 0xcb, 0xf6, 0x8f, 0xda, 0x5b, + 0xc3, 0xee, 0xc2, 0x75, 0x11, 0x56, 0x25, 0xe6, 0x05, 0xf1, 0xe3, 0x66, 0x3e, 0x2e, 0xfe, 0x22, + 0x80, 0x45, 0x54, 0xed, 0xc0, 0x72, 0x87, 0x9b, 0xc5, 0x0e, 0xd9, 0x68, 0x8d, 0x0c, 0xdf, 0x8d, + 0xff, 0x90, 0x20, 0x27, 0x8c, 0x2f, 0x4c, 0xaf, 0xd3, 0x0f, 0x28, 0x3e, 0x01, 0x99, 0x5c, 0x10, + 0xdb, 0x13, 0x27, 0x6e, 0x3b, 0x4a, 0x10, 0xc1, 0x16, 0x74, 0x0a, 0x34, 0x38, 0x3e, 0x48, 0x2d, + 0x36, 0x27, 0xb5, 0xb0, 0xb8, 0xf8, 0x62, 0x71, 0x0f, 0x41, 0x66, 0xd4, 0xd1, 0x0e, 0x4a, 0x41, + 0xe2, 0x49, 0xbd, 0x52, 0x53, 0x25, 0x7a, 0x24, 0xab, 0x7a, 0xe9, 0xb9, 0xe8, 0x9c, 0x93, 0x67, + 0xac, 0x8b, 0xe2, 0x78, 0x13, 0xe0, 0x90, 0x78, 0xf3, 0x4a, 0xf6, 0x18, 0x14, 0xe6, 0x15, 0x79, + 0x7e, 0x00, 0xc9, 0x53, 0x8b, 0x0c, 0xba, 0xae, 0xa8, 0x54, 0xae, 0xc0, 0xaf, 0xcf, 0x82, 0x7f, + 0xb7, 0x16, 0x4a, 0xf6, 0xc4, 0x10, 0x18, 0x5c, 0x85, 0x74, 0x85, 0x6a, 0x9d, 0x43, 0x1e, 0x62, + 0x8b, 0xfd, 0x03, 0xb6, 0xdb, 0x90, 0x29, 0x93, 0x01, 0xf1, 0xe6, 0x7e, 0xde, 0x23, 0x48, 0x95, + 0x9d, 0xce, 0xf8, 0x9c, 0xd6, 0xe0, 0xbf, 0x6d, 0x55, 0x02, 0x75, 0x7f, 0x3c, 0x38, 0x8b, 0x88, + 0xff, 0x10, 0x56, 0xba, 0x82, 0xdd, 0x3f, 0x4b, 0xfe, 0x71, 0xf6, 0x77, 0x35, 0xa6, 0x08, 0xfc, + 0x3e, 0x5c, 0x0b, 0x51, 0x88, 0xf2, 0xe5, 0x40, 0xee, 0x38, 0x63, 0xd1, 0x26, 0xb2, 0xc1, 0x17, + 0xf8, 0x2e, 0x87, 0x46, 0x93, 0x53, 0x21, 0x6e, 0x75, 0xf9, 0x46, 0x2b, 0x06, 0xfd, 0x89, 0xf3, + 0x80, 0xc2, 0xb0, 0x85, 0x94, 0x55, 0xc8, 0x34, 0x88, 0x39, 0xa2, 0x5d, 0xc7, 0xe9, 0x1e, 0x43, + 0xd6, 0x65, 0x86, 0xd6, 0x88, 0x5b, 0x16, 0x7e, 0xc0, 0x8c, 0x1b, 0x0e, 0xc6, 0xc7, 0x90, 0xf5, + 0xd9, 0xc4, 0xae, 0x8f, 0x20, 0x13, 0xd0, 0xb9, 0xe3, 0xc1, 0x62, 0xb6, 0xb4, 0xcf, 0x46, 0x91, + 0xf8, 0x17, 0x09, 0x14, 0x56, 0x95, 0x03, 0xc7, 0x3e, 0xb5, 0x7a, 0x94, 0x8a, 0x55, 0xb1, 0x75, + 0x6e, 0x0e, 0x87, 0x96, 0xdd, 0x5b, 0x4c, 0xc5, 0xa0, 0x4f, 0x39, 0x12, 0x6d, 0x01, 0xf0, 0x50, + 0x6f, 0x32, 0x24, 0xe2, 0xaa, 0x5e, 0x61, 0x96, 0xe6, 0x64, 0x48, 0x9b, 0x15, 0x71, 0xb7, 0xeb, + 0x39, 0x23, 0xb3, 0x47, 0x38, 0x8c, 0x4f, 0x03, 0x95, 0x79, 0x1a, 0xdc, 0x41, 0xd1, 0xb8, 0x0e, + 0xeb, 0x87, 0xc4, 0x0b, 0x29, 0x0b, 0x92, 0x7d, 0x08, 0x7c, 0xdb, 0x56, 0x87, 0xd9, 0x85, 0x40, + 0x24, 0x3e, 0x7e, 0x38, 0x42, 0xb1, 0xa6, 0x0b, 0x5c, 0x83, 0x35, 0x9f, 0x90, 0xde, 0xec, 0x6e, + 0x88, 0x4f, 0xf1, 0x75, 0x99, 0xde, 0xe2, 0x93, 0x04, 0x56, 0x10, 0x8e, 0x7f, 0x8f, 0x41, 0xea, + 0xd9, 0xc8, 0x19, 0x3a, 0xae, 0x39, 0x40, 0xf7, 0xa2, 0x17, 0xce, 0x9a, 0x10, 0xe3, 0xfb, 0xff, + 0xe5, 0x25, 0x73, 0x0f, 0x52, 0x7e, 0xe7, 0x8a, 0x5b, 0xe6, 0x52, 0x6b, 0x07, 0x00, 0x71, 0xb4, + 0x12, 0xc1, 0xd1, 0x8a, 0x1c, 0x0c, 0xf9, 0x6d, 0x07, 0xc3, 0x6f, 0xec, 0xe4, 0xb4, 0xb1, 0xcf, + 0xae, 0xbc, 0xb8, 0xe8, 0x44, 0xd3, 0x9b, 0xad, 0x5a, 0xbd, 0xac, 0xab, 0x12, 0x7d, 0x1c, 0x95, + 0xf5, 0xaa, 0xde, 0xd4, 0xb9, 0x81, 0xbd, 0x3a, 0x2a, 0xb5, 0xb2, 0xfe, 0x95, 0x1a, 0xa7, 0xb7, + 0x19, 0xf7, 0xa9, 0x09, 0x94, 0x05, 0xd8, 0x3f, 0xa9, 0x1e, 0xb7, 0xb8, 0x4f, 0xa6, 0x71, 0x6c, + 0x2d, 0x00, 0xc9, 0xdd, 0x37, 0x34, 0x90, 0x8a, 0x43, 0x36, 0xac, 0xce, 0xbc, 0x88, 0xd0, 0xd6, + 0xc2, 0x47, 0x9c, 0xf6, 0xbf, 0xc5, 0x0f, 0x29, 0xbc, 0xf9, 0xfa, 0xcf, 0xbf, 0x7e, 0x8e, 0xad, + 0xa3, 0x5c, 0xf1, 0xe2, 0xa3, 0x22, 0x2d, 0x6c, 0xb1, 0xcf, 0x50, 0x1d, 0x46, 0xde, 0x84, 0x94, + 0x3f, 0x10, 0xd1, 0xfa, 0xa5, 0xaf, 0xad, 0xd3, 0x37, 0xa9, 0xb6, 0x11, 0xda, 0x21, 0x3c, 0xa3, + 0xf0, 0x06, 0xa3, 0xbe, 0x86, 0x56, 0x03, 0x6a, 0xda, 0x3c, 0x63, 0x17, 0xed, 0x83, 0x12, 0x9a, + 0x98, 0x68, 0x66, 0xbc, 0x87, 0xa6, 0xa8, 0x36, 0x67, 0x4f, 0xbc, 0x84, 0xca, 0x90, 0x0e, 0x8f, + 0x4f, 0xa4, 0x45, 0x49, 0xc2, 0x33, 0x75, 0x01, 0xcb, 0xd7, 0x81, 0x92, 0x85, 0x29, 0xce, 0x90, + 0x47, 0xb2, 0xd4, 0x58, 0x96, 0x39, 0x84, 0x68, 0x96, 0x62, 0xae, 0xf9, 0x89, 0x1e, 0x06, 0x22, + 0xd9, 0x38, 0x9d, 0xcb, 0x7f, 0x6b, 0xc1, 0xec, 0xc5, 0x4b, 0xf7, 0x25, 0x74, 0x0c, 0xf1, 0x43, + 0xe2, 0x21, 0xff, 0xdd, 0x36, 0x1d, 0x7e, 0x1a, 0x0a, 0x9b, 0x44, 0xc4, 0x16, 0x93, 0xb4, 0x81, + 0xd6, 0xa8, 0xa4, 0xa0, 0x8b, 0x8b, 0x3f, 0x58, 0xdd, 0xcf, 0xf3, 0xf9, 0x1f, 0xd1, 0x37, 0x7e, + 0x37, 0x5d, 0x0f, 0x5f, 0x07, 0x6f, 0x2b, 0xd6, 0xc7, 0x8c, 0xb4, 0xa0, 0x65, 0x22, 0xa4, 0x7b, + 0x52, 0xfe, 0x95, 0xa6, 0x5d, 0xbd, 0xd1, 0x9e, 0x94, 0x47, 0x27, 0x90, 0xe4, 0x97, 0x3f, 0xca, + 0xf9, 0xe7, 0x2b, 0x3c, 0x32, 0xe6, 0xee, 0x26, 0x52, 0xc8, 0xcf, 0x49, 0xa1, 0x01, 0x2b, 0xc1, + 0xa4, 0x42, 0x7e, 0x03, 0xce, 0x8e, 0x3f, 0xed, 0xc6, 0x65, 0x87, 0xa8, 0xd0, 0x75, 0x46, 0x9f, + 0xd1, 0x52, 0x94, 0xbe, 0x3d, 0x1e, 0x9c, 0x51, 0xad, 0xcf, 0x01, 0xa6, 0xc3, 0x0a, 0x85, 0x83, + 0xa3, 0x9a, 0x6f, 0x5e, 0xe1, 0x89, 0xf2, 0xe6, 0x23, 0xbc, 0x55, 0x48, 0xf2, 0x51, 0x14, 0xd4, + 0x20, 0x32, 0xe7, 0xb4, 0xb5, 0x19, 0xab, 0xe0, 0x5a, 0x63, 0x5c, 0xab, 0x18, 0x28, 0x17, 0x1f, + 0x47, 0x94, 0xad, 0x02, 0xd9, 0xe8, 0x9d, 0x3f, 0xb7, 0xab, 0xb6, 0xa6, 0xad, 0x71, 0xc5, 0x88, + 0xc0, 0x4b, 0xe8, 0x10, 0x32, 0x91, 0xdb, 0x7e, 0x2e, 0xd3, 0xe6, 0x0c, 0x53, 0x64, 0x36, 0xe0, + 0x25, 0xf4, 0x19, 0xa4, 0x1a, 0xb6, 0x39, 0x74, 0xfb, 0x8e, 0x37, 0x97, 0x63, 0xee, 0x21, 0xdc, + 0xdf, 0x79, 0xf5, 0x6e, 0xcf, 0xf2, 0xfa, 0xe3, 0x76, 0xa1, 0xe3, 0x9c, 0x17, 0xcf, 0x1d, 0x77, + 0x7c, 0x66, 0x16, 0xdb, 0x03, 0xd3, 0xf5, 0x8a, 0xd1, 0xff, 0xa1, 0xdb, 0x49, 0xb6, 0x7e, 0xf0, + 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd3, 0x30, 0x9b, 0x20, 0x5c, 0x0f, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1210,9 +1456,11 @@ type IndexClient interface { ClusterLeave(ctx context.Context, in *ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) ClusterInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterInfoResponse, error) ClusterWatch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_ClusterWatchClient, error) - GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) - IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Index_IndexDocumentClient, error) - DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Index_DeleteDocumentClient, error) + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) + Index(ctx context.Context, in *IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) + BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) + BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) GetIndexConfig(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexConfigResponse, error) GetIndexStats(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexStatsResponse, error) @@ -1304,81 +1552,49 @@ func (x *indexClusterWatchClient) Recv() (*ClusterWatchResponse, error) { return m, nil } -func (c *indexClient) GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) { - out := new(GetDocumentResponse) - err := c.cc.Invoke(ctx, "/index.Index/GetDocument", in, out, opts...) +func (c *indexClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { + out := new(GetResponse) + err := c.cc.Invoke(ctx, "/index.Index/Get", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *indexClient) IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Index_IndexDocumentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Index_serviceDesc.Streams[1], "/index.Index/IndexDocument", opts...) +func (c *indexClient) Index(ctx context.Context, in *IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Index", in, out, opts...) if err != nil { return nil, err } - x := &indexIndexDocumentClient{stream} - return x, nil -} - -type Index_IndexDocumentClient interface { - Send(*IndexDocumentRequest) error - CloseAndRecv() (*IndexDocumentResponse, error) - grpc.ClientStream -} - -type indexIndexDocumentClient struct { - grpc.ClientStream -} - -func (x *indexIndexDocumentClient) Send(m *IndexDocumentRequest) error { - return x.ClientStream.SendMsg(m) + return out, nil } -func (x *indexIndexDocumentClient) CloseAndRecv() (*IndexDocumentResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(IndexDocumentResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { +func (c *indexClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Delete", in, out, opts...) + if err != nil { return nil, err } - return m, nil + return out, nil } -func (c *indexClient) DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Index_DeleteDocumentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Index_serviceDesc.Streams[2], "/index.Index/DeleteDocument", opts...) +func (c *indexClient) BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) { + out := new(BulkIndexResponse) + err := c.cc.Invoke(ctx, "/index.Index/BulkIndex", in, out, opts...) if err != nil { return nil, err } - x := &indexDeleteDocumentClient{stream} - return x, nil -} - -type Index_DeleteDocumentClient interface { - Send(*DeleteDocumentRequest) error - CloseAndRecv() (*DeleteDocumentResponse, error) - grpc.ClientStream -} - -type indexDeleteDocumentClient struct { - grpc.ClientStream -} - -func (x *indexDeleteDocumentClient) Send(m *DeleteDocumentRequest) error { - return x.ClientStream.SendMsg(m) + return out, nil } -func (x *indexDeleteDocumentClient) CloseAndRecv() (*DeleteDocumentResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(DeleteDocumentResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { +func (c *indexClient) BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) { + out := new(BulkDeleteResponse) + err := c.cc.Invoke(ctx, "/index.Index/BulkDelete", in, out, opts...) + if err != nil { return nil, err } - return m, nil + return out, nil } func (c *indexClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { @@ -1425,15 +1641,67 @@ type IndexServer interface { ClusterLeave(context.Context, *ClusterLeaveRequest) (*empty.Empty, error) ClusterInfo(context.Context, *empty.Empty) (*ClusterInfoResponse, error) ClusterWatch(*empty.Empty, Index_ClusterWatchServer) error - GetDocument(context.Context, *GetDocumentRequest) (*GetDocumentResponse, error) - IndexDocument(Index_IndexDocumentServer) error - DeleteDocument(Index_DeleteDocumentServer) error + Get(context.Context, *GetRequest) (*GetResponse, error) + Index(context.Context, *IndexRequest) (*empty.Empty, error) + Delete(context.Context, *DeleteRequest) (*empty.Empty, error) + BulkIndex(context.Context, *BulkIndexRequest) (*BulkIndexResponse, error) + BulkDelete(context.Context, *BulkDeleteRequest) (*BulkDeleteResponse, error) Search(context.Context, *SearchRequest) (*SearchResponse, error) GetIndexConfig(context.Context, *empty.Empty) (*GetIndexConfigResponse, error) GetIndexStats(context.Context, *empty.Empty) (*GetIndexStatsResponse, error) Snapshot(context.Context, *empty.Empty) (*empty.Empty, error) } +// UnimplementedIndexServer can be embedded to have forward compatible implementations. +type UnimplementedIndexServer struct { +} + +func (*UnimplementedIndexServer) NodeHealthCheck(ctx context.Context, req *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeHealthCheck not implemented") +} +func (*UnimplementedIndexServer) NodeInfo(ctx context.Context, req *empty.Empty) (*NodeInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeInfo not implemented") +} +func (*UnimplementedIndexServer) ClusterJoin(ctx context.Context, req *ClusterJoinRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClusterJoin not implemented") +} +func (*UnimplementedIndexServer) ClusterLeave(ctx context.Context, req *ClusterLeaveRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClusterLeave not implemented") +} +func (*UnimplementedIndexServer) ClusterInfo(ctx context.Context, req *empty.Empty) (*ClusterInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClusterInfo not implemented") +} +func (*UnimplementedIndexServer) ClusterWatch(req *empty.Empty, srv Index_ClusterWatchServer) error { + return status.Errorf(codes.Unimplemented, "method ClusterWatch not implemented") +} +func (*UnimplementedIndexServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (*UnimplementedIndexServer) Index(ctx context.Context, req *IndexRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Index not implemented") +} +func (*UnimplementedIndexServer) Delete(ctx context.Context, req *DeleteRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (*UnimplementedIndexServer) BulkIndex(ctx context.Context, req *BulkIndexRequest) (*BulkIndexResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BulkIndex not implemented") +} +func (*UnimplementedIndexServer) BulkDelete(ctx context.Context, req *BulkDeleteRequest) (*BulkDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BulkDelete not implemented") +} +func (*UnimplementedIndexServer) Search(ctx context.Context, req *SearchRequest) (*SearchResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Search not implemented") +} +func (*UnimplementedIndexServer) GetIndexConfig(ctx context.Context, req *empty.Empty) (*GetIndexConfigResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetIndexConfig not implemented") +} +func (*UnimplementedIndexServer) GetIndexStats(ctx context.Context, req *empty.Empty) (*GetIndexStatsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetIndexStats not implemented") +} +func (*UnimplementedIndexServer) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Snapshot not implemented") +} + func RegisterIndexServer(s *grpc.Server, srv IndexServer) { s.RegisterService(&_Index_serviceDesc, srv) } @@ -1549,74 +1817,94 @@ func (x *indexClusterWatchServer) Send(m *ClusterWatchResponse) error { return x.ServerStream.SendMsg(m) } -func _Index_GetDocument_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetDocumentRequest) +func _Index_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(IndexServer).GetDocument(ctx, in) + return srv.(IndexServer).Get(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Index/GetDocument", + FullMethod: "/index.Index/Get", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).GetDocument(ctx, req.(*GetDocumentRequest)) + return srv.(IndexServer).Get(ctx, req.(*GetRequest)) } return interceptor(ctx, in, info, handler) } -func _Index_IndexDocument_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(IndexServer).IndexDocument(&indexIndexDocumentServer{stream}) -} - -type Index_IndexDocumentServer interface { - SendAndClose(*IndexDocumentResponse) error - Recv() (*IndexDocumentRequest, error) - grpc.ServerStream -} - -type indexIndexDocumentServer struct { - grpc.ServerStream -} - -func (x *indexIndexDocumentServer) SendAndClose(m *IndexDocumentResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *indexIndexDocumentServer) Recv() (*IndexDocumentRequest, error) { - m := new(IndexDocumentRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { +func _Index_Index_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(IndexRequest) + if err := dec(in); err != nil { return nil, err } - return m, nil -} - -func _Index_DeleteDocument_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(IndexServer).DeleteDocument(&indexDeleteDocumentServer{stream}) -} - -type Index_DeleteDocumentServer interface { - SendAndClose(*DeleteDocumentResponse) error - Recv() (*DeleteDocumentRequest, error) - grpc.ServerStream + if interceptor == nil { + return srv.(IndexServer).Index(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Index", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Index(ctx, req.(*IndexRequest)) + } + return interceptor(ctx, in, info, handler) } -type indexDeleteDocumentServer struct { - grpc.ServerStream +func _Index_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Delete(ctx, req.(*DeleteRequest)) + } + return interceptor(ctx, in, info, handler) } -func (x *indexDeleteDocumentServer) SendAndClose(m *DeleteDocumentResponse) error { - return x.ServerStream.SendMsg(m) +func _Index_BulkIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BulkIndexRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).BulkIndex(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/BulkIndex", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).BulkIndex(ctx, req.(*BulkIndexRequest)) + } + return interceptor(ctx, in, info, handler) } -func (x *indexDeleteDocumentServer) Recv() (*DeleteDocumentRequest, error) { - m := new(DeleteDocumentRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { +func _Index_BulkDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BulkDeleteRequest) + if err := dec(in); err != nil { return nil, err } - return m, nil + if interceptor == nil { + return srv.(IndexServer).BulkDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/BulkDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).BulkDelete(ctx, req.(*BulkDeleteRequest)) + } + return interceptor(ctx, in, info, handler) } func _Index_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { @@ -1716,8 +2004,24 @@ var _Index_serviceDesc = grpc.ServiceDesc{ Handler: _Index_ClusterInfo_Handler, }, { - MethodName: "GetDocument", - Handler: _Index_GetDocument_Handler, + MethodName: "Get", + Handler: _Index_Get_Handler, + }, + { + MethodName: "Index", + Handler: _Index_Index_Handler, + }, + { + MethodName: "Delete", + Handler: _Index_Delete_Handler, + }, + { + MethodName: "BulkIndex", + Handler: _Index_BulkIndex_Handler, + }, + { + MethodName: "BulkDelete", + Handler: _Index_BulkDelete_Handler, }, { MethodName: "Search", @@ -1742,16 +2046,6 @@ var _Index_serviceDesc = grpc.ServiceDesc{ Handler: _Index_ClusterWatch_Handler, ServerStreams: true, }, - { - StreamName: "IndexDocument", - Handler: _Index_IndexDocument_Handler, - ClientStreams: true, - }, - { - StreamName: "DeleteDocument", - Handler: _Index_DeleteDocument_Handler, - ClientStreams: true, - }, }, Metadata: "protobuf/index/index.proto", } diff --git a/protobuf/index/index.pb.gw.go b/protobuf/index/index.pb.gw.go new file mode 100644 index 0000000..a54291a --- /dev/null +++ b/protobuf/index/index.pb.gw.go @@ -0,0 +1,510 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: protobuf/index/index.proto + +/* +Package index is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package index + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/empty" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +var ( + filter_Index_NodeHealthCheck_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Index_NodeHealthCheck_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq NodeHealthCheckRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Index_NodeHealthCheck_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.NodeHealthCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Index_NodeInfo_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.NodeInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Index_ClusterInfo_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.ClusterInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Index_Get_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Index_Index_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq IndexRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Index(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Index_Index_1(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq IndexRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Index(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Index_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Delete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Index_BulkIndex_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkIndexRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BulkIndex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Index_BulkDelete_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkDeleteRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BulkDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Index_Search_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SearchRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Search(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +// RegisterIndexHandlerFromEndpoint is same as RegisterIndexHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterIndexHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterIndexHandler(ctx, mux, conn) +} + +// RegisterIndexHandler registers the http handlers for service Index to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterIndexHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterIndexHandlerClient(ctx, mux, NewIndexClient(conn)) +} + +// RegisterIndexHandlerClient registers the http handlers for service Index +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "IndexClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "IndexClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "IndexClient" to call the correct interceptors. +func RegisterIndexHandlerClient(ctx context.Context, mux *runtime.ServeMux, client IndexClient) error { + + mux.Handle("GET", pattern_Index_NodeHealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_NodeHealthCheck_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_NodeHealthCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_NodeInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_NodeInfo_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_NodeInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_ClusterInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_ClusterInfo_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_ClusterInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Get_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_Index_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Index_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Index_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_Index_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Index_1(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Index_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Delete_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_BulkIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_BulkIndex_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_BulkIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_BulkDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_BulkDelete_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_BulkDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Index_Search_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Search_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Search_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Index_NodeHealthCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "healthcheck"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_NodeInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "status"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_ClusterInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "cluster", "status"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Index_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "documents"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Index_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_BulkIndex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "bulk"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_BulkDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "bulk"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Search_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "search"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Index_NodeHealthCheck_0 = runtime.ForwardResponseMessage + + forward_Index_NodeInfo_0 = runtime.ForwardResponseMessage + + forward_Index_ClusterInfo_0 = runtime.ForwardResponseMessage + + forward_Index_Get_0 = runtime.ForwardResponseMessage + + forward_Index_Index_0 = runtime.ForwardResponseMessage + + forward_Index_Index_1 = runtime.ForwardResponseMessage + + forward_Index_Delete_0 = runtime.ForwardResponseMessage + + forward_Index_BulkIndex_0 = runtime.ForwardResponseMessage + + forward_Index_BulkDelete_0 = runtime.ForwardResponseMessage + + forward_Index_Search_0 = runtime.ForwardResponseMessage +) diff --git a/protobuf/index/index.proto b/protobuf/index/index.proto index 0943a3b..5dee6a8 100644 --- a/protobuf/index/index.proto +++ b/protobuf/index/index.proto @@ -16,24 +16,71 @@ syntax = "proto3"; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; +import "google/api/annotations.proto"; package index; option go_package = "github.com/mosuka/blast/protobuf/index"; service Index { - rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) {} - rpc NodeInfo (google.protobuf.Empty) returns (NodeInfoResponse) {} + rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) { + option (google.api.http) = { + get: "/v1/node/healthcheck" + }; + } + rpc NodeInfo (google.protobuf.Empty) returns (NodeInfoResponse) { + option (google.api.http) = { + get: "/v1/node/status" + }; + } rpc ClusterJoin (ClusterJoinRequest) returns (google.protobuf.Empty) {} rpc ClusterLeave (ClusterLeaveRequest) returns (google.protobuf.Empty) {} - rpc ClusterInfo (google.protobuf.Empty) returns (ClusterInfoResponse) {} + rpc ClusterInfo (google.protobuf.Empty) returns (ClusterInfoResponse) { + option (google.api.http) = { + get: "/v1/cluster/status" + }; + } rpc ClusterWatch (google.protobuf.Empty) returns (stream ClusterWatchResponse) {} - rpc GetDocument (GetDocumentRequest) returns (GetDocumentResponse) {} - rpc IndexDocument (stream IndexDocumentRequest) returns (IndexDocumentResponse) {} - rpc DeleteDocument (stream DeleteDocumentRequest) returns (DeleteDocumentResponse) {} - rpc Search (SearchRequest) returns (SearchResponse) {} + rpc Get (GetRequest) returns (GetResponse) { + option (google.api.http) = { + get: "/v1/documents/{id=**}" + }; + } + rpc Index (IndexRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + put: "/v1/documents" + body: "*" + additional_bindings { + put: "/v1/documents/{id=**}" + body: "*" + } + }; + } + rpc Delete (DeleteRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/documents/{id=**}" + }; + } + rpc BulkIndex (BulkIndexRequest) returns (BulkIndexResponse) { + option (google.api.http) = { + put: "/v1/bulk" + body: "*" + }; + } + rpc BulkDelete (BulkDeleteRequest) returns (BulkDeleteResponse) { + option (google.api.http) = { + delete: "/v1/bulk" + body: "*" + }; + } + rpc Search (SearchRequest) returns (SearchResponse) { + option (google.api.http) = { + post: "/v1/search" + body: "*" + }; + } rpc GetIndexConfig (google.protobuf.Empty) returns (GetIndexConfigResponse) {} rpc GetIndexStats (google.protobuf.Empty) returns (GetIndexStatsResponse) {} rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) {} @@ -41,28 +88,31 @@ service Index { message NodeHealthCheckRequest { enum Probe { - HEALTHINESS = 0; - LIVENESS = 1; - READINESS = 2; + UNKNOWN = 0; + HEALTHINESS = 1; + LIVENESS = 2; + READINESS = 3; } Probe probe = 1; } message NodeHealthCheckResponse { enum State { - HEALTHY = 0; - UNHEALTHY = 1; - ALIVE = 2; - DEAD = 3; - READY = 4; - NOT_READY = 5; + UNKNOWN = 0; + HEALTHY = 1; + UNHEALTHY = 2; + ALIVE = 3; + DEAD = 4; + READY = 5; + NOT_READY = 6; } State state = 1; } message Metadata { string grpc_address = 1; - string http_address = 2; + string grpc_gateway_address = 2; + string http_address = 3; } message Node { @@ -111,32 +161,42 @@ message ClusterWatchResponse { Cluster cluster = 3; } -message Document { +message GetRequest { + string id = 1; +} + +message GetResponse { +// Document document = 1; + google.protobuf.Any fields = 1; +} + +message IndexRequest { string id = 1; google.protobuf.Any fields = 2; } -message GetDocumentRequest { +message DeleteRequest { string id = 1; } -message GetDocumentResponse { - Document document = 1; +message Document { + string id = 1; + google.protobuf.Any fields = 2; } -message IndexDocumentRequest { - Document document = 1; +message BulkIndexRequest { + repeated Document documents = 1; } -message IndexDocumentResponse { +message BulkIndexResponse { int32 count = 1; } -message DeleteDocumentRequest { - string id = 1; +message BulkDeleteRequest { + repeated string ids = 1; } -message DeleteDocumentResponse { +message BulkDeleteResponse { int32 count = 1; } @@ -162,3 +222,20 @@ message GetIndexStatsResponse { google.protobuf.Any index_stats = 1; } +message Proposal { + enum Event { + UNKNOWN = 0; + SET_NODE = 1; + DELETE_NODE = 2; + INDEX = 3; + DELETE = 4; + BULK_INDEX = 5; + BULK_DELETE = 6; + } + Event event = 1; + Node node = 2; + Document document = 3; + string id = 4; + repeated Document documents = 5; + repeated string ids = 6; +} diff --git a/protobuf/management/management.pb.go b/protobuf/management/management.pb.go index a2554fb..430c2e5 100644 --- a/protobuf/management/management.pb.go +++ b/protobuf/management/management.pb.go @@ -9,7 +9,10 @@ import ( proto "github.com/golang/protobuf/proto" any "github.com/golang/protobuf/ptypes/any" empty "github.com/golang/protobuf/ptypes/empty" + _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" math "math" ) @@ -22,26 +25,29 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type NodeHealthCheckRequest_Probe int32 const ( - NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 0 - NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 1 - NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 2 + NodeHealthCheckRequest_UNKNOWN NodeHealthCheckRequest_Probe = 0 + NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 1 + NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 2 + NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 3 ) var NodeHealthCheckRequest_Probe_name = map[int32]string{ - 0: "HEALTHINESS", - 1: "LIVENESS", - 2: "READINESS", + 0: "UNKNOWN", + 1: "HEALTHINESS", + 2: "LIVENESS", + 3: "READINESS", } var NodeHealthCheckRequest_Probe_value = map[string]int32{ - "HEALTHINESS": 0, - "LIVENESS": 1, - "READINESS": 2, + "UNKNOWN": 0, + "HEALTHINESS": 1, + "LIVENESS": 2, + "READINESS": 3, } func (x NodeHealthCheckRequest_Probe) String() string { @@ -55,30 +61,33 @@ func (NodeHealthCheckRequest_Probe) EnumDescriptor() ([]byte, []int) { type NodeHealthCheckResponse_State int32 const ( - NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 0 - NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 1 - NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 2 - NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 3 - NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 4 - NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 5 + NodeHealthCheckResponse_UNKNOWN NodeHealthCheckResponse_State = 0 + NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 1 + NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 2 + NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 3 + NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 4 + NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 5 + NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 6 ) var NodeHealthCheckResponse_State_name = map[int32]string{ - 0: "HEALTHY", - 1: "UNHEALTHY", - 2: "ALIVE", - 3: "DEAD", - 4: "READY", - 5: "NOT_READY", + 0: "UNKNOWN", + 1: "HEALTHY", + 2: "UNHEALTHY", + 3: "ALIVE", + 4: "DEAD", + 5: "READY", + 6: "NOT_READY", } var NodeHealthCheckResponse_State_value = map[string]int32{ - "HEALTHY": 0, - "UNHEALTHY": 1, - "ALIVE": 2, - "DEAD": 3, - "READY": 4, - "NOT_READY": 5, + "UNKNOWN": 0, + "HEALTHY": 1, + "UNHEALTHY": 2, + "ALIVE": 3, + "DEAD": 4, + "READY": 5, + "NOT_READY": 6, } func (x NodeHealthCheckResponse_State) String() string { @@ -179,7 +188,41 @@ func (x WatchResponse_Command) String() string { } func (WatchResponse_Command) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{15, 0} + return fileDescriptor_5e030ad796566078, []int{16, 0} +} + +type Proposal_Event int32 + +const ( + Proposal_UNKNOWN Proposal_Event = 0 + Proposal_SET_NODE Proposal_Event = 1 + Proposal_DELETE_NODE Proposal_Event = 2 + Proposal_SET_VALUE Proposal_Event = 3 + Proposal_DELETE_VALUE Proposal_Event = 4 +) + +var Proposal_Event_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SET_NODE", + 2: "DELETE_NODE", + 3: "SET_VALUE", + 4: "DELETE_VALUE", +} + +var Proposal_Event_value = map[string]int32{ + "UNKNOWN": 0, + "SET_NODE": 1, + "DELETE_NODE": 2, + "SET_VALUE": 3, + "DELETE_VALUE": 4, +} + +func (x Proposal_Event) String() string { + return proto.EnumName(Proposal_Event_name, int32(x)) +} + +func (Proposal_Event) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{17, 0} } type NodeHealthCheckRequest struct { @@ -218,7 +261,7 @@ func (m *NodeHealthCheckRequest) GetProbe() NodeHealthCheckRequest_Probe { if m != nil { return m.Probe } - return NodeHealthCheckRequest_HEALTHINESS + return NodeHealthCheckRequest_UNKNOWN } type NodeHealthCheckResponse struct { @@ -257,12 +300,13 @@ func (m *NodeHealthCheckResponse) GetState() NodeHealthCheckResponse_State { if m != nil { return m.State } - return NodeHealthCheckResponse_HEALTHY + return NodeHealthCheckResponse_UNKNOWN } type Metadata struct { GrpcAddress string `protobuf:"bytes,1,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` - HttpAddress string `protobuf:"bytes,2,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` + GrpcGatewayAddress string `protobuf:"bytes,2,opt,name=grpc_gateway_address,json=grpcGatewayAddress,proto3" json:"grpc_gateway_address,omitempty"` + HttpAddress string `protobuf:"bytes,3,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -300,6 +344,13 @@ func (m *Metadata) GetGrpcAddress() string { return "" } +func (m *Metadata) GetGrpcGatewayAddress() string { + if m != nil { + return m.GrpcGatewayAddress + } + return "" +} + func (m *Metadata) GetHttpAddress() string { if m != nil { return m.HttpAddress @@ -620,6 +671,53 @@ func (m *ClusterWatchResponse) GetCluster() *Cluster { return nil } +type KeyValue struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *any.Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (m *KeyValue) String() string { return proto.CompactTextString(m) } +func (*KeyValue) ProtoMessage() {} +func (*KeyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{10} +} + +func (m *KeyValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyValue.Unmarshal(m, b) +} +func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic) +} +func (m *KeyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyValue.Merge(m, src) +} +func (m *KeyValue) XXX_Size() int { + return xxx_messageInfo_KeyValue.Size(m) +} +func (m *KeyValue) XXX_DiscardUnknown() { + xxx_messageInfo_KeyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyValue proto.InternalMessageInfo + +func (m *KeyValue) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *KeyValue) GetValue() *any.Any { + if m != nil { + return m.Value + } + return nil +} + type GetRequest struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -631,7 +729,7 @@ func (m *GetRequest) Reset() { *m = GetRequest{} } func (m *GetRequest) String() string { return proto.CompactTextString(m) } func (*GetRequest) ProtoMessage() {} func (*GetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{10} + return fileDescriptor_5e030ad796566078, []int{11} } func (m *GetRequest) XXX_Unmarshal(b []byte) error { @@ -670,7 +768,7 @@ func (m *GetResponse) Reset() { *m = GetResponse{} } func (m *GetResponse) String() string { return proto.CompactTextString(m) } func (*GetResponse) ProtoMessage() {} func (*GetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{11} + return fileDescriptor_5e030ad796566078, []int{12} } func (m *GetResponse) XXX_Unmarshal(b []byte) error { @@ -710,7 +808,7 @@ func (m *SetRequest) Reset() { *m = SetRequest{} } func (m *SetRequest) String() string { return proto.CompactTextString(m) } func (*SetRequest) ProtoMessage() {} func (*SetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{12} + return fileDescriptor_5e030ad796566078, []int{13} } func (m *SetRequest) XXX_Unmarshal(b []byte) error { @@ -756,7 +854,7 @@ func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } func (*DeleteRequest) ProtoMessage() {} func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{13} + return fileDescriptor_5e030ad796566078, []int{14} } func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { @@ -795,7 +893,7 @@ func (m *WatchRequest) Reset() { *m = WatchRequest{} } func (m *WatchRequest) String() string { return proto.CompactTextString(m) } func (*WatchRequest) ProtoMessage() {} func (*WatchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{14} + return fileDescriptor_5e030ad796566078, []int{15} } func (m *WatchRequest) XXX_Unmarshal(b []byte) error { @@ -836,7 +934,7 @@ func (m *WatchResponse) Reset() { *m = WatchResponse{} } func (m *WatchResponse) String() string { return proto.CompactTextString(m) } func (*WatchResponse) ProtoMessage() {} func (*WatchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{15} + return fileDescriptor_5e030ad796566078, []int{16} } func (m *WatchResponse) XXX_Unmarshal(b []byte) error { @@ -878,12 +976,68 @@ func (m *WatchResponse) GetValue() *any.Any { return nil } +type Proposal struct { + Event Proposal_Event `protobuf:"varint,1,opt,name=event,proto3,enum=management.Proposal_Event" json:"event,omitempty"` + Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` + KeyValue *KeyValue `protobuf:"bytes,3,opt,name=key_value,json=keyValue,proto3" json:"key_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Proposal) Reset() { *m = Proposal{} } +func (m *Proposal) String() string { return proto.CompactTextString(m) } +func (*Proposal) ProtoMessage() {} +func (*Proposal) Descriptor() ([]byte, []int) { + return fileDescriptor_5e030ad796566078, []int{17} +} + +func (m *Proposal) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Proposal.Unmarshal(m, b) +} +func (m *Proposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Proposal.Marshal(b, m, deterministic) +} +func (m *Proposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_Proposal.Merge(m, src) +} +func (m *Proposal) XXX_Size() int { + return xxx_messageInfo_Proposal.Size(m) +} +func (m *Proposal) XXX_DiscardUnknown() { + xxx_messageInfo_Proposal.DiscardUnknown(m) +} + +var xxx_messageInfo_Proposal proto.InternalMessageInfo + +func (m *Proposal) GetEvent() Proposal_Event { + if m != nil { + return m.Event + } + return Proposal_UNKNOWN +} + +func (m *Proposal) GetNode() *Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *Proposal) GetKeyValue() *KeyValue { + if m != nil { + return m.KeyValue + } + return nil +} + func init() { proto.RegisterEnum("management.NodeHealthCheckRequest_Probe", NodeHealthCheckRequest_Probe_name, NodeHealthCheckRequest_Probe_value) proto.RegisterEnum("management.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) proto.RegisterEnum("management.Node_State", Node_State_name, Node_State_value) proto.RegisterEnum("management.ClusterWatchResponse_Event", ClusterWatchResponse_Event_name, ClusterWatchResponse_Event_value) proto.RegisterEnum("management.WatchResponse_Command", WatchResponse_Command_name, WatchResponse_Command_value) + proto.RegisterEnum("management.Proposal_Event", Proposal_Event_name, Proposal_Event_value) proto.RegisterType((*NodeHealthCheckRequest)(nil), "management.NodeHealthCheckRequest") proto.RegisterType((*NodeHealthCheckResponse)(nil), "management.NodeHealthCheckResponse") proto.RegisterType((*Metadata)(nil), "management.Metadata") @@ -895,12 +1049,14 @@ func init() { proto.RegisterType((*ClusterLeaveRequest)(nil), "management.ClusterLeaveRequest") proto.RegisterType((*ClusterInfoResponse)(nil), "management.ClusterInfoResponse") proto.RegisterType((*ClusterWatchResponse)(nil), "management.ClusterWatchResponse") + proto.RegisterType((*KeyValue)(nil), "management.KeyValue") proto.RegisterType((*GetRequest)(nil), "management.GetRequest") proto.RegisterType((*GetResponse)(nil), "management.GetResponse") proto.RegisterType((*SetRequest)(nil), "management.SetRequest") proto.RegisterType((*DeleteRequest)(nil), "management.DeleteRequest") proto.RegisterType((*WatchRequest)(nil), "management.WatchRequest") proto.RegisterType((*WatchResponse)(nil), "management.WatchResponse") + proto.RegisterType((*Proposal)(nil), "management.Proposal") } func init() { @@ -908,68 +1064,82 @@ func init() { } var fileDescriptor_5e030ad796566078 = []byte{ - // 963 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xed, 0x6e, 0xdb, 0x36, - 0x14, 0xb5, 0x2c, 0x2b, 0x76, 0xae, 0x93, 0x56, 0x60, 0x8b, 0x34, 0xf1, 0x86, 0x2e, 0xe1, 0xba, - 0x22, 0x5b, 0x57, 0xa7, 0xf0, 0x56, 0x2c, 0xeb, 0xba, 0x0f, 0x35, 0xd2, 0x62, 0xa7, 0xaa, 0x1c, - 0xc8, 0x4e, 0x83, 0x0e, 0x03, 0x0a, 0xd9, 0x62, 0x6d, 0x23, 0xb6, 0xe4, 0x59, 0x74, 0x80, 0x3c, - 0xc3, 0x06, 0xec, 0x4d, 0xf6, 0x77, 0xaf, 0xb3, 0x5f, 0x7b, 0x8e, 0x82, 0x22, 0x25, 0x4b, 0x8a, - 0xec, 0xe4, 0x9f, 0x79, 0x79, 0xce, 0xe5, 0xb9, 0x87, 0xf7, 0x52, 0x86, 0x47, 0xd3, 0x99, 0x4f, - 0xfd, 0xde, 0xfc, 0xc3, 0xc1, 0xc4, 0xf1, 0x9c, 0x01, 0x99, 0x10, 0x8f, 0x26, 0x7e, 0xd6, 0xc3, - 0x6d, 0x04, 0x8b, 0x48, 0x6d, 0x67, 0xe0, 0xfb, 0x83, 0x31, 0x39, 0x88, 0x89, 0x8e, 0x77, 0xc5, - 0x61, 0xb5, 0x4f, 0xb2, 0x5b, 0x64, 0x32, 0xa5, 0x62, 0x13, 0xff, 0x2d, 0xc1, 0x96, 0xe5, 0xbb, - 0xa4, 0x49, 0x9c, 0x31, 0x1d, 0x1e, 0x0d, 0x49, 0xff, 0xc2, 0x26, 0x7f, 0xcc, 0x49, 0x40, 0xd1, - 0x4f, 0xa0, 0x4c, 0x67, 0x7e, 0x8f, 0x6c, 0x4b, 0xbb, 0xd2, 0xfe, 0x9d, 0xc6, 0x7e, 0x3d, 0x21, - 0x20, 0x9f, 0x52, 0x3f, 0x65, 0x78, 0x9b, 0xd3, 0xf0, 0x73, 0x50, 0xc2, 0x35, 0xba, 0x0b, 0xd5, - 0xa6, 0xa1, 0x99, 0xdd, 0x66, 0xcb, 0x32, 0x3a, 0x1d, 0xb5, 0x80, 0x36, 0xa0, 0x62, 0xb6, 0xde, - 0x1a, 0xe1, 0x4a, 0x42, 0x9b, 0xb0, 0x6e, 0x1b, 0x9a, 0xce, 0x37, 0x8b, 0xf8, 0x1f, 0x09, 0x1e, - 0x5c, 0x4b, 0x1f, 0x4c, 0x7d, 0x2f, 0x20, 0xe8, 0x67, 0x50, 0x02, 0xea, 0xd0, 0x48, 0xd2, 0x97, - 0x2b, 0x25, 0x71, 0x4e, 0xbd, 0xc3, 0x08, 0x36, 0xe7, 0x61, 0x1b, 0x94, 0x70, 0x8d, 0xaa, 0x50, - 0xe6, 0x9a, 0xde, 0xa9, 0x05, 0xa6, 0xe0, 0xcc, 0x8a, 0x96, 0x12, 0x5a, 0x07, 0x45, 0x63, 0xfa, - 0xd4, 0x22, 0xaa, 0x40, 0x49, 0x37, 0x34, 0x5d, 0x95, 0x59, 0x90, 0xa9, 0x7c, 0xa7, 0x96, 0x18, - 0xdc, 0x6a, 0x77, 0xdf, 0xf3, 0xa5, 0x82, 0x4f, 0xa1, 0xf2, 0x86, 0x50, 0xc7, 0x75, 0xa8, 0x83, - 0xf6, 0x60, 0x63, 0x30, 0x9b, 0xf6, 0xdf, 0x3b, 0xae, 0x3b, 0x23, 0x41, 0x10, 0xea, 0x5c, 0xb7, - 0xab, 0x2c, 0xa6, 0xf1, 0x10, 0x83, 0x0c, 0x29, 0x9d, 0xc6, 0x90, 0x22, 0x87, 0xb0, 0x98, 0x80, - 0xe0, 0xff, 0x25, 0x28, 0xb1, 0x72, 0xd0, 0x1d, 0x28, 0x8e, 0x5c, 0x91, 0xa4, 0x38, 0x72, 0x19, - 0xb7, 0x37, 0xf2, 0xdc, 0x2c, 0x97, 0xc5, 0xa2, 0xf4, 0x5f, 0x47, 0x16, 0xc9, 0xa1, 0x45, 0x5b, - 0x59, 0x8b, 0x52, 0x7e, 0xa0, 0x67, 0x50, 0x99, 0x08, 0xed, 0xdb, 0xa5, 0x5d, 0x69, 0xbf, 0xda, - 0xb8, 0x9f, 0x24, 0x44, 0x75, 0xd9, 0x31, 0x0a, 0xbf, 0x4e, 0x38, 0x78, 0x66, 0xbd, 0xb6, 0xda, - 0xe7, 0x16, 0xbf, 0xd1, 0x5f, 0xdb, 0xa6, 0xd9, 0x3e, 0x37, 0x6c, 0x7e, 0xa3, 0x47, 0x9a, 0xa5, - 0xb7, 0x74, 0xad, 0xcb, 0x4c, 0x04, 0x58, 0x33, 0x0d, 0x4d, 0x37, 0x6c, 0x55, 0x66, 0xc0, 0x4e, - 0xf3, 0xac, 0xab, 0x33, 0x5a, 0x09, 0xff, 0x29, 0x41, 0xf9, 0x68, 0x3c, 0x0f, 0x28, 0x99, 0xa1, - 0x6f, 0x41, 0xf1, 0x7c, 0x97, 0x30, 0xcf, 0xe4, 0xfd, 0x6a, 0xe3, 0x61, 0x52, 0x87, 0xc0, 0x84, - 0x05, 0x04, 0x86, 0x47, 0x67, 0x57, 0x36, 0x07, 0xd7, 0x4e, 0x00, 0x16, 0x41, 0xa4, 0x82, 0x7c, - 0x41, 0xae, 0x84, 0x61, 0xec, 0x27, 0x7a, 0x0c, 0xca, 0xa5, 0x33, 0x9e, 0x93, 0xd0, 0xaa, 0x6a, - 0x43, 0xcd, 0xda, 0x61, 0xf3, 0xed, 0x17, 0xc5, 0x43, 0x09, 0x1f, 0x82, 0xca, 0x42, 0x2d, 0xef, - 0x83, 0x1f, 0x77, 0xdc, 0x23, 0x28, 0xb1, 0x83, 0xc2, 0x94, 0x79, 0xf4, 0x70, 0x17, 0xbf, 0x00, - 0x24, 0x24, 0x9e, 0xf8, 0x23, 0x2f, 0x1a, 0xa0, 0xdb, 0x71, 0xbf, 0x80, 0x7b, 0x82, 0x6b, 0x12, - 0xe7, 0x92, 0x44, 0xe4, 0xcc, 0xd5, 0x63, 0x3d, 0x86, 0xa5, 0xf4, 0x3d, 0x85, 0x72, 0x9f, 0x87, - 0xc5, 0x31, 0xf7, 0x72, 0x7c, 0xb3, 0x23, 0x0c, 0xfe, 0x4f, 0x82, 0xfb, 0x22, 0x78, 0xee, 0xd0, - 0xfe, 0x30, 0xce, 0xf3, 0x12, 0x14, 0x72, 0x49, 0x3c, 0x2a, 0x26, 0xeb, 0x71, 0x4e, 0x96, 0x14, - 0xa1, 0x6e, 0x30, 0xb4, 0xcd, 0x49, 0x71, 0xa5, 0xc5, 0x55, 0x95, 0x26, 0xb5, 0xca, 0xb7, 0xd0, - 0xfa, 0x1c, 0x94, 0xf0, 0x90, 0x74, 0xa7, 0x55, 0xa0, 0x74, 0xd2, 0x6e, 0x59, 0x7c, 0x4c, 0x4d, - 0x43, 0x7b, 0x2b, 0x3a, 0xec, 0xec, 0x34, 0xec, 0x36, 0x19, 0x3f, 0x04, 0x38, 0x26, 0x34, 0xb2, - 0xf1, 0x5a, 0x47, 0xe0, 0xef, 0xa1, 0x1a, 0xee, 0x8b, 0xc2, 0xbf, 0x8a, 0x1a, 0x44, 0x12, 0xed, - 0xcf, 0x5f, 0xcb, 0x7a, 0xf4, 0x5a, 0xd6, 0x35, 0xef, 0x4a, 0x34, 0x09, 0x3e, 0x01, 0xe8, 0xac, - 0x48, 0xbd, 0xc8, 0x55, 0xbc, 0x39, 0xd7, 0x1e, 0x6c, 0xea, 0x64, 0x4c, 0x28, 0x59, 0xae, 0x74, - 0x17, 0x36, 0x84, 0xe7, 0xcb, 0x10, 0xff, 0x4a, 0xb0, 0x99, 0xbe, 0xc7, 0x1f, 0xa0, 0xdc, 0xf7, - 0x27, 0x13, 0xc7, 0x73, 0xc5, 0x4d, 0xee, 0x25, 0x3d, 0x4e, 0x5f, 0xe1, 0x11, 0x07, 0xda, 0x11, - 0x23, 0x3a, 0xa0, 0x98, 0x53, 0x91, 0x7c, 0x73, 0x45, 0x4f, 0xa0, 0x2c, 0x32, 0xa6, 0x6f, 0xac, - 0x0c, 0x72, 0xc7, 0xe8, 0xaa, 0x12, 0xbb, 0x25, 0xdd, 0x30, 0x0d, 0xf6, 0x26, 0x34, 0xfe, 0x5a, - 0x03, 0x78, 0x13, 0x0b, 0x43, 0xbf, 0xc3, 0xdd, 0xcc, 0xfb, 0x8d, 0xf0, 0xcd, 0xdf, 0x9b, 0xda, - 0xe7, 0xb7, 0xf8, 0x00, 0xe0, 0x02, 0x7a, 0x05, 0x95, 0x68, 0xb0, 0xd1, 0xd6, 0xb5, 0x12, 0x0c, - 0xf6, 0x39, 0xac, 0x7d, 0x9a, 0x4d, 0x95, 0x1c, 0x33, 0x5c, 0x40, 0xc7, 0x50, 0x4d, 0x8c, 0x38, - 0xca, 0x7b, 0x9e, 0x12, 0xb3, 0x5f, 0x5b, 0x72, 0x0c, 0x2e, 0xa0, 0x16, 0x6c, 0x24, 0xe7, 0x1d, - 0x7d, 0x96, 0x93, 0x29, 0xf9, 0x12, 0xac, 0x48, 0xd5, 0x8c, 0x35, 0xad, 0x2c, 0x2d, 0xef, 0x84, - 0x4c, 0x75, 0x66, 0x2c, 0x2a, 0x6c, 0x91, 0xa5, 0xa9, 0x76, 0x6f, 0x7a, 0x17, 0x70, 0xe1, 0x99, - 0x84, 0x0e, 0x41, 0x3e, 0x26, 0x14, 0xa5, 0xbe, 0x3d, 0x8b, 0x99, 0xac, 0x3d, 0xb8, 0x16, 0x8f, - 0x75, 0x7c, 0x07, 0x72, 0x27, 0xcb, 0x5c, 0x8c, 0xdc, 0x0a, 0x2b, 0x7e, 0x84, 0x35, 0x3e, 0x4e, - 0x68, 0x27, 0xc9, 0x4d, 0x8d, 0xd8, 0x0a, 0xfa, 0x2f, 0xa0, 0xf0, 0xc2, 0xb7, 0x73, 0xc6, 0x85, - 0x93, 0x77, 0x96, 0x0e, 0x52, 0x58, 0xf3, 0x4b, 0xa8, 0x74, 0x3c, 0x67, 0x1a, 0x0c, 0x7d, 0xba, - 0xd4, 0xbd, 0xa5, 0xe7, 0xbf, 0x7a, 0xfa, 0xdb, 0x93, 0xc1, 0x88, 0x0e, 0xe7, 0xbd, 0x7a, 0xdf, - 0x9f, 0x1c, 0x4c, 0xfc, 0x60, 0x7e, 0xe1, 0x1c, 0xf4, 0xc6, 0x4e, 0x40, 0x0f, 0x72, 0xfe, 0x0a, - 0xf6, 0xd6, 0xc2, 0xe0, 0x37, 0x1f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb6, 0xdd, 0x42, 0x6a, 0x28, - 0x0a, 0x00, 0x00, + // 1193 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0xcd, 0x72, 0xda, 0x56, + 0x14, 0x8e, 0x10, 0x18, 0x7c, 0x20, 0xb1, 0x72, 0xcd, 0xf8, 0x87, 0x7a, 0x52, 0x5b, 0x4d, 0x33, + 0xae, 0xd3, 0x80, 0xe3, 0xb6, 0x33, 0xa9, 0xfb, 0x4b, 0x2c, 0xd5, 0xc6, 0x26, 0xe0, 0x0a, 0x6c, + 0x8f, 0xbb, 0xf1, 0x5c, 0xe0, 0x06, 0x18, 0x40, 0xa2, 0xe8, 0xe2, 0x96, 0xe9, 0x74, 0x93, 0x6d, + 0x97, 0xdd, 0xf6, 0x3d, 0xba, 0xc8, 0x63, 0xf4, 0x05, 0xba, 0xe8, 0x74, 0xd3, 0x97, 0xe8, 0xdc, + 0x1f, 0xc9, 0x12, 0x16, 0xb6, 0xdb, 0x9d, 0x74, 0xce, 0x77, 0xbe, 0xf3, 0x9d, 0x73, 0x8f, 0xce, + 0x05, 0x78, 0x3c, 0x1c, 0x39, 0xd4, 0x69, 0x8c, 0x5f, 0x17, 0x06, 0xd8, 0xc6, 0x6d, 0x32, 0x20, + 0x36, 0x0d, 0x3c, 0xe6, 0xb9, 0x1b, 0xc1, 0x95, 0x25, 0xb7, 0xda, 0x76, 0x9c, 0x76, 0x9f, 0x14, + 0xfc, 0x40, 0x6c, 0x4f, 0x04, 0x2c, 0xf7, 0xce, 0xb4, 0x8b, 0x0c, 0x86, 0xd4, 0x73, 0xae, 0x49, + 0x27, 0x1e, 0x76, 0x0b, 0xd8, 0xb6, 0x1d, 0x8a, 0x69, 0xd7, 0xb1, 0x5d, 0xe1, 0xd5, 0x7f, 0x53, + 0x60, 0xa9, 0xe2, 0xb4, 0xc8, 0x01, 0xc1, 0x7d, 0xda, 0xd9, 0xeb, 0x90, 0x66, 0xcf, 0x22, 0xdf, + 0x8f, 0x89, 0x4b, 0xd1, 0x97, 0x90, 0x18, 0x8e, 0x9c, 0x06, 0x59, 0x51, 0xd6, 0x95, 0xcd, 0x07, + 0x3b, 0x9b, 0xf9, 0x80, 0xbc, 0xe8, 0x90, 0xfc, 0x31, 0xc3, 0x5b, 0x22, 0x4c, 0x7f, 0x09, 0x09, + 0xfe, 0x8e, 0xd2, 0x90, 0x3c, 0xa9, 0x1c, 0x55, 0xaa, 0x67, 0x15, 0xed, 0x1e, 0x5a, 0x80, 0xf4, + 0x81, 0x59, 0x2c, 0xd7, 0x0f, 0x4a, 0x15, 0xb3, 0x56, 0xd3, 0x14, 0x94, 0x81, 0x54, 0xb9, 0x74, + 0x6a, 0xf2, 0xb7, 0x18, 0xba, 0x0f, 0xf3, 0x96, 0x59, 0x34, 0x84, 0x53, 0xd5, 0xdf, 0x2a, 0xb0, + 0x7c, 0x2d, 0x97, 0x3b, 0x74, 0x6c, 0x97, 0xa0, 0xaf, 0x20, 0xe1, 0x52, 0x4c, 0x3d, 0x7d, 0x1f, + 0xdc, 0xa8, 0x4f, 0xc4, 0xe4, 0x6b, 0x2c, 0xc0, 0x12, 0x71, 0xfa, 0x05, 0x24, 0xf8, 0x7b, 0x58, + 0x60, 0x1a, 0x92, 0x42, 0xe0, 0xb9, 0xa6, 0x30, 0x39, 0x27, 0x15, 0xef, 0x35, 0x86, 0xe6, 0x21, + 0x51, 0x64, 0x62, 0x35, 0x15, 0xa5, 0x20, 0x6e, 0x98, 0x45, 0x43, 0x8b, 0x33, 0x23, 0x93, 0x7c, + 0xae, 0x25, 0x18, 0xbc, 0x52, 0xad, 0x5f, 0x88, 0xd7, 0x39, 0xfd, 0x8d, 0x02, 0xa9, 0x57, 0x84, + 0xe2, 0x16, 0xa6, 0x18, 0x6d, 0x40, 0xa6, 0x3d, 0x1a, 0x36, 0x2f, 0x70, 0xab, 0x35, 0x22, 0xae, + 0xcb, 0x55, 0xcf, 0x5b, 0x69, 0x66, 0x2b, 0x0a, 0x13, 0xda, 0x86, 0x2c, 0x87, 0xb4, 0x31, 0x25, + 0x3f, 0xe0, 0x89, 0x0f, 0x8d, 0x71, 0x28, 0x62, 0xbe, 0x7d, 0xe1, 0xf2, 0x22, 0x36, 0x20, 0xd3, + 0xa1, 0x74, 0xe8, 0x23, 0x55, 0x41, 0xca, 0x6c, 0x12, 0xa2, 0xff, 0xad, 0x40, 0x9c, 0xb5, 0x03, + 0x3d, 0x80, 0x58, 0xb7, 0x25, 0xd3, 0xc6, 0xba, 0x2d, 0x16, 0xdb, 0xe8, 0xda, 0xad, 0xa9, 0x2c, + 0x69, 0x66, 0xf3, 0xe8, 0x3f, 0xf4, 0x5a, 0xac, 0xf2, 0x16, 0x2f, 0x4d, 0xb7, 0x38, 0xd4, 0x4f, + 0xb4, 0x0d, 0xa9, 0x81, 0xac, 0x76, 0x25, 0xbe, 0xae, 0x6c, 0xa6, 0x77, 0xb2, 0xc1, 0x00, 0xaf, + 0x13, 0x96, 0x8f, 0xd2, 0x8f, 0x22, 0x4f, 0x20, 0x03, 0xa9, 0x6f, 0xaa, 0xe5, 0x72, 0xf5, 0xcc, + 0xb4, 0xc4, 0x11, 0xec, 0x15, 0x2b, 0x46, 0xc9, 0x28, 0xd6, 0x4d, 0x2d, 0x86, 0x00, 0xe6, 0xca, + 0x66, 0xd1, 0x30, 0x2d, 0x4d, 0x65, 0xc0, 0xda, 0xc1, 0x49, 0xdd, 0x60, 0x61, 0x71, 0xfd, 0x17, + 0x05, 0x92, 0x7b, 0xfd, 0xb1, 0x4b, 0xc9, 0x08, 0x7d, 0x0c, 0x09, 0xdb, 0x69, 0x11, 0xd6, 0x65, + 0x75, 0x33, 0xbd, 0xf3, 0x28, 0xa8, 0x43, 0x62, 0x78, 0x01, 0xae, 0x69, 0xd3, 0xd1, 0xc4, 0x12, + 0xe0, 0xdc, 0x21, 0xc0, 0x95, 0x11, 0x69, 0xa0, 0xf6, 0xc8, 0x44, 0x36, 0x8c, 0x3d, 0xa2, 0x27, + 0x90, 0xb8, 0xc4, 0xfd, 0x31, 0xe1, 0xad, 0x4a, 0xef, 0x68, 0xd3, 0xed, 0xb0, 0x84, 0x7b, 0x37, + 0xf6, 0x42, 0xd1, 0x5f, 0x80, 0xc6, 0x4c, 0x25, 0xfb, 0xb5, 0xe3, 0x4f, 0xec, 0x63, 0x88, 0xb3, + 0x44, 0x9c, 0x32, 0x2a, 0x9c, 0x7b, 0xf5, 0x5d, 0x40, 0x52, 0xe2, 0xa1, 0xd3, 0xb5, 0xbd, 0xaf, + 0xf1, 0x6e, 0xb1, 0xef, 0xc3, 0xa2, 0x8c, 0x2d, 0x13, 0x7c, 0x49, 0xbc, 0xe0, 0xa9, 0xa3, 0xd7, + 0x0d, 0x1f, 0x16, 0xd2, 0xf7, 0x0c, 0x92, 0x4d, 0x61, 0x96, 0x69, 0x16, 0x23, 0xfa, 0x66, 0x79, + 0x18, 0xfd, 0x4f, 0x05, 0xb2, 0xd2, 0x78, 0x86, 0x69, 0xb3, 0xe3, 0xf3, 0x7c, 0x0e, 0x09, 0x72, + 0x49, 0x6c, 0x2a, 0xbf, 0xcc, 0x27, 0x11, 0x2c, 0xa1, 0x80, 0xbc, 0xc9, 0xd0, 0x96, 0x08, 0xf2, + 0x2b, 0x8d, 0xdd, 0x54, 0x69, 0x50, 0xab, 0x7a, 0x07, 0xad, 0x9f, 0x40, 0x82, 0x27, 0x09, 0x4f, + 0x5a, 0x0a, 0xe2, 0x87, 0xd5, 0x52, 0x45, 0x53, 0xd8, 0x47, 0x5c, 0x36, 0x8b, 0xa7, 0x72, 0xc2, + 0x4e, 0x8e, 0xf9, 0xb4, 0xa9, 0xfa, 0x01, 0xa4, 0x8e, 0xc8, 0xe4, 0x94, 0x9d, 0x6a, 0xc4, 0x3c, + 0x6c, 0x85, 0xe7, 0x21, 0x9b, 0x17, 0xab, 0x36, 0xef, 0xed, 0xe1, 0x7c, 0xd1, 0x9e, 0xc8, 0x99, + 0xd0, 0x1f, 0x01, 0xec, 0x13, 0xea, 0x1d, 0xc8, 0x35, 0x2e, 0xfd, 0x53, 0x48, 0x73, 0xbf, 0x6c, + 0xa1, 0x4f, 0xad, 0xdc, 0x4e, 0x7d, 0x08, 0x50, 0xbb, 0x81, 0xfa, 0x3f, 0xc9, 0xdc, 0x80, 0xfb, + 0x06, 0xe9, 0x13, 0x4a, 0x66, 0x2b, 0x5d, 0x87, 0x8c, 0x3c, 0xbd, 0x59, 0x88, 0xdf, 0x15, 0xb8, + 0x1f, 0x9e, 0x88, 0xcf, 0x20, 0xd9, 0x74, 0x06, 0x03, 0x6c, 0xb7, 0xe4, 0x4c, 0x6c, 0x04, 0x4f, + 0x2b, 0x3c, 0x0c, 0x7b, 0x02, 0x68, 0x79, 0x11, 0x5e, 0x82, 0x58, 0x44, 0x45, 0xea, 0xed, 0x15, + 0x3d, 0x85, 0xa4, 0x64, 0x0c, 0x9f, 0x7d, 0x12, 0xd4, 0x9a, 0x59, 0xd7, 0x14, 0x76, 0xde, 0x86, + 0x59, 0x36, 0xd9, 0x76, 0xd1, 0xff, 0x51, 0x20, 0x75, 0x3c, 0x72, 0x86, 0x8e, 0x8b, 0xfb, 0x68, + 0x3b, 0x3c, 0xc6, 0xb9, 0xa0, 0x64, 0x0f, 0xf4, 0x7f, 0x46, 0xf7, 0x39, 0xcc, 0xf7, 0xc8, 0xe4, + 0x22, 0x5c, 0x41, 0x00, 0xea, 0x4d, 0x9c, 0x95, 0xea, 0xc9, 0x27, 0xbd, 0x1e, 0x39, 0xbe, 0x6c, + 0xff, 0x99, 0xf5, 0x8b, 0x4a, 0xd5, 0x30, 0x35, 0x85, 0xdd, 0xac, 0xa2, 0x0e, 0x61, 0xe0, 0x77, + 0x29, 0x73, 0x9f, 0x16, 0xcb, 0x27, 0xec, 0xc6, 0xd2, 0x20, 0x23, 0xfd, 0xc2, 0x12, 0xdf, 0x79, + 0x9b, 0x04, 0x78, 0xe5, 0xe7, 0x45, 0x3f, 0xc2, 0xc2, 0xd4, 0xbd, 0x89, 0xf4, 0xdb, 0x2f, 0xfd, + 0xdc, 0x7b, 0x77, 0xb8, 0x78, 0xf5, 0xb5, 0x37, 0x7f, 0xfc, 0xf5, 0x6b, 0x6c, 0x09, 0x65, 0x0b, + 0x97, 0xcf, 0x0b, 0xac, 0x0b, 0x85, 0x0e, 0x47, 0x35, 0x79, 0x9a, 0x73, 0x48, 0x79, 0xcb, 0x12, + 0x2d, 0x5d, 0x3b, 0x4c, 0x93, 0xfd, 0x9a, 0xc9, 0xad, 0x4d, 0xa7, 0x09, 0xae, 0x2e, 0x7d, 0x99, + 0xf3, 0x3f, 0x44, 0x0b, 0x3e, 0x3f, 0xbb, 0x93, 0xc6, 0x2e, 0xda, 0x87, 0x74, 0x60, 0x9b, 0xa2, + 0xa8, 0x9b, 0x20, 0xb0, 0x66, 0x73, 0x33, 0xb2, 0xeb, 0xf7, 0x50, 0x09, 0x32, 0xc1, 0xd5, 0x8a, + 0xde, 0x8d, 0x60, 0x0a, 0x2e, 0xdd, 0x1b, 0xa8, 0x1a, 0xbe, 0xa6, 0x1b, 0x2b, 0x8e, 0xca, 0x10, + 0x2a, 0x3a, 0xc7, 0x8b, 0xce, 0x22, 0xc4, 0x8a, 0x96, 0x9b, 0xce, 0xab, 0xbb, 0xec, 0xcb, 0xe5, + 0x5f, 0xd7, 0xcc, 0x24, 0xeb, 0xb7, 0x2d, 0x67, 0xfd, 0xde, 0xb6, 0x82, 0xbe, 0x05, 0x75, 0x9f, + 0x50, 0x14, 0xfa, 0x01, 0x70, 0xb5, 0xce, 0x72, 0xcb, 0xd7, 0xec, 0x32, 0x76, 0x95, 0x2b, 0x5c, + 0x44, 0x0f, 0x99, 0x42, 0x76, 0xe5, 0x17, 0x7e, 0xea, 0x91, 0xc9, 0x17, 0x5b, 0x5b, 0x3f, 0xa3, + 0x1a, 0xa8, 0xb5, 0x69, 0xca, 0xab, 0x35, 0x36, 0xb3, 0x7b, 0x72, 0x90, 0x72, 0xd7, 0x19, 0x77, + 0x95, 0x2d, 0x74, 0x0a, 0x73, 0x62, 0x7d, 0xa1, 0xd5, 0x20, 0x6f, 0x68, 0xa5, 0xcd, 0xa4, 0x96, + 0x62, 0xb7, 0x22, 0xc4, 0x7e, 0x0d, 0x09, 0xd1, 0xc6, 0x95, 0x88, 0xbd, 0x25, 0x58, 0x57, 0x67, + 0x6e, 0x34, 0xde, 0xc1, 0x63, 0x48, 0xd5, 0x6c, 0x3c, 0x74, 0x3b, 0x0e, 0x9d, 0x79, 0x16, 0xb3, + 0x84, 0x65, 0xb9, 0xb0, 0x07, 0x28, 0xc3, 0x84, 0xb9, 0x92, 0xe5, 0xe5, 0xb3, 0xef, 0x9e, 0xb6, + 0xbb, 0xb4, 0x33, 0x6e, 0xe4, 0x9b, 0xce, 0xa0, 0x30, 0x70, 0xdc, 0x71, 0x0f, 0x17, 0x1a, 0x7d, + 0xec, 0xd2, 0x42, 0xc4, 0x9f, 0x8b, 0xc6, 0x1c, 0x37, 0x7e, 0xf4, 0x6f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x57, 0x7b, 0x42, 0x0c, 0x7a, 0x0c, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1165,6 +1335,44 @@ type ManagementServer interface { Snapshot(context.Context, *empty.Empty) (*empty.Empty, error) } +// UnimplementedManagementServer can be embedded to have forward compatible implementations. +type UnimplementedManagementServer struct { +} + +func (*UnimplementedManagementServer) NodeHealthCheck(ctx context.Context, req *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeHealthCheck not implemented") +} +func (*UnimplementedManagementServer) NodeInfo(ctx context.Context, req *empty.Empty) (*NodeInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeInfo not implemented") +} +func (*UnimplementedManagementServer) ClusterJoin(ctx context.Context, req *ClusterJoinRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClusterJoin not implemented") +} +func (*UnimplementedManagementServer) ClusterLeave(ctx context.Context, req *ClusterLeaveRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClusterLeave not implemented") +} +func (*UnimplementedManagementServer) ClusterInfo(ctx context.Context, req *empty.Empty) (*ClusterInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClusterInfo not implemented") +} +func (*UnimplementedManagementServer) ClusterWatch(req *empty.Empty, srv Management_ClusterWatchServer) error { + return status.Errorf(codes.Unimplemented, "method ClusterWatch not implemented") +} +func (*UnimplementedManagementServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (*UnimplementedManagementServer) Set(ctx context.Context, req *SetRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") +} +func (*UnimplementedManagementServer) Delete(ctx context.Context, req *DeleteRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (*UnimplementedManagementServer) Watch(req *WatchRequest, srv Management_WatchServer) error { + return status.Errorf(codes.Unimplemented, "method Watch not implemented") +} +func (*UnimplementedManagementServer) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Snapshot not implemented") +} + func RegisterManagementServer(s *grpc.Server, srv ManagementServer) { s.RegisterService(&_Management_serviceDesc, srv) } diff --git a/protobuf/management/management.pb.gw.go b/protobuf/management/management.pb.gw.go new file mode 100644 index 0000000..5430218 --- /dev/null +++ b/protobuf/management/management.pb.gw.go @@ -0,0 +1,379 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: protobuf/management/management.proto + +/* +Package management is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package management + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/empty" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +var ( + filter_Management_NodeHealthCheck_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Management_NodeHealthCheck_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq NodeHealthCheckRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Management_NodeHealthCheck_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.NodeHealthCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Management_NodeInfo_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.NodeInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Management_ClusterInfo_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.ClusterInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Management_Get_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["key"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") + } + + protoReq.Key, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) + } + + msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Management_Set_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SetRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["key"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") + } + + protoReq.Key, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) + } + + msg, err := client.Set(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Management_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["key"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") + } + + protoReq.Key, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) + } + + msg, err := client.Delete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Management_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.Snapshot(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +// RegisterManagementHandlerFromEndpoint is same as RegisterManagementHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterManagementHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterManagementHandler(ctx, mux, conn) +} + +// RegisterManagementHandler registers the http handlers for service Management to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterManagementHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterManagementHandlerClient(ctx, mux, NewManagementClient(conn)) +} + +// RegisterManagementHandlerClient registers the http handlers for service Management +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ManagementClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ManagementClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "ManagementClient" to call the correct interceptors. +func RegisterManagementHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ManagementClient) error { + + mux.Handle("GET", pattern_Management_NodeHealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Management_NodeHealthCheck_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Management_NodeHealthCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Management_NodeInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Management_NodeInfo_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Management_NodeInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Management_ClusterInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Management_ClusterInfo_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Management_ClusterInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Management_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Management_Get_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Management_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Management_Set_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Management_Set_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Management_Set_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Management_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Management_Delete_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Management_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Management_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Management_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Management_Snapshot_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Management_NodeHealthCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "healthcheck"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Management_NodeInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "status"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Management_ClusterInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "cluster", "status"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Management_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "data", "key"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Management_Set_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "data", "key"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Management_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "data", "key"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Management_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "snapshot"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Management_NodeHealthCheck_0 = runtime.ForwardResponseMessage + + forward_Management_NodeInfo_0 = runtime.ForwardResponseMessage + + forward_Management_ClusterInfo_0 = runtime.ForwardResponseMessage + + forward_Management_Get_0 = runtime.ForwardResponseMessage + + forward_Management_Set_0 = runtime.ForwardResponseMessage + + forward_Management_Delete_0 = runtime.ForwardResponseMessage + + forward_Management_Snapshot_0 = runtime.ForwardResponseMessage +) diff --git a/protobuf/management/management.proto b/protobuf/management/management.proto index 2a7d736..621603b 100644 --- a/protobuf/management/management.proto +++ b/protobuf/management/management.proto @@ -16,51 +16,84 @@ syntax = "proto3"; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; +import "google/api/annotations.proto"; package management; option go_package = "github.com/mosuka/blast/protobuf/management"; service Management { - rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) {} - rpc NodeInfo (google.protobuf.Empty) returns (NodeInfoResponse) {} + rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) { + option (google.api.http) = { + get: "/v1/node/healthcheck" + }; + } + rpc NodeInfo (google.protobuf.Empty) returns (NodeInfoResponse) { + option (google.api.http) = { + get: "/v1/node/status" + }; + } rpc ClusterJoin (ClusterJoinRequest) returns (google.protobuf.Empty) {} rpc ClusterLeave (ClusterLeaveRequest) returns (google.protobuf.Empty) {} - rpc ClusterInfo (google.protobuf.Empty) returns (ClusterInfoResponse) {} + rpc ClusterInfo (google.protobuf.Empty) returns (ClusterInfoResponse) { + option (google.api.http) = { + get: "/v1/cluster/status" + }; + } rpc ClusterWatch (google.protobuf.Empty) returns (stream ClusterWatchResponse) {} - rpc Get (GetRequest) returns (GetResponse) {} - rpc Set (SetRequest) returns (google.protobuf.Empty) {} - rpc Delete (DeleteRequest) returns (google.protobuf.Empty) {} + rpc Get (GetRequest) returns (GetResponse) { + option (google.api.http) = { + get: "/v1/data/{key=**}" + }; + } + rpc Set (SetRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + put: "/v1/data/{key=**}" + body: "*" + }; + } + rpc Delete (DeleteRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/data/{key=**}" + }; + } rpc Watch (WatchRequest) returns (stream WatchResponse) {} - rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) {} + rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) { + option (google.api.http) = { + get: "/v1/snapshot" + }; + } } message NodeHealthCheckRequest { enum Probe { - HEALTHINESS = 0; - LIVENESS = 1; - READINESS = 2; + UNKNOWN = 0; + HEALTHINESS = 1; + LIVENESS = 2; + READINESS = 3; } Probe probe = 1; } message NodeHealthCheckResponse { enum State { - HEALTHY = 0; - UNHEALTHY = 1; - ALIVE = 2; - DEAD = 3; - READY = 4; - NOT_READY = 5; + UNKNOWN = 0; + HEALTHY = 1; + UNHEALTHY = 2; + ALIVE = 3; + DEAD = 4; + READY = 5; + NOT_READY = 6; } State state = 1; } message Metadata { string grpc_address = 1; - string http_address = 2; + string grpc_gateway_address = 2; + string http_address = 3; } message Node { @@ -109,6 +142,11 @@ message ClusterWatchResponse { Cluster cluster = 3; } +message KeyValue { + string key = 1; + google.protobuf.Any value = 2; +} + message GetRequest { string key = 1; } @@ -140,3 +178,16 @@ message WatchResponse { string key = 2; google.protobuf.Any value = 3; } + +message Proposal { + enum Event { + UNKNOWN = 0; + SET_NODE = 1; + DELETE_NODE = 2; + SET_VALUE = 3; + DELETE_VALUE = 4; + } + Event event = 1; + Node node = 2; + KeyValue key_value = 3; +}