From a4acc0f5608a673c2f9def6119acf883252bc3ef Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 7 Apr 2020 16:57:57 +0900 Subject: [PATCH 1/2] Delete the experimentally implemented feature for distributed search --- .gitignore | 20 +- CHANGES.md | 144 +- Dockerfile | 61 +- Makefile | 101 +- README.md | 1281 +++----- builtin/config_bleve.go | 5 + builtins/config_badger.go | 21 - builtins/config_bleve.go | 19 - client/grpc_client.go | 218 ++ cmd/blast/dispatcher_delete.go | 125 - cmd/blast/dispatcher_get.go | 64 - cmd/blast/dispatcher_index.go | 273 -- cmd/blast/dispatcher_node_health.go | 79 - cmd/blast/dispatcher_search.go | 119 - cmd/blast/dispatcher_start.go | 98 - cmd/blast/indexer_cluster_info.go | 56 - cmd/blast/indexer_cluster_leave.go | 68 - cmd/blast/indexer_cluster_watch.go | 83 - cmd/blast/indexer_delete.go | 125 - cmd/blast/indexer_get.go | 64 - cmd/blast/indexer_index.go | 272 -- cmd/blast/indexer_node_health.go | 79 - cmd/blast/indexer_node_info.go | 57 - cmd/blast/indexer_search.go | 119 - cmd/blast/indexer_snapshot.go | 56 - cmd/blast/indexer_start.go | 136 - cmd/blast/main.go | 1010 ------ cmd/blast/manager_cluster_info.go | 55 - cmd/blast/manager_cluster_leave.go | 63 - cmd/blast/manager_cluster_watch.go | 84 - cmd/blast/manager_delete.go | 64 - cmd/blast/manager_get.go | 60 - cmd/blast/manager_node_health.go | 79 - cmd/blast/manager_node_info.go | 55 - cmd/blast/manager_set.go | 92 - cmd/blast/manager_snapshot.go | 55 - cmd/blast/manager_start.go | 134 - cmd/blast/manager_watch.go | 74 - cmd/bulk_delete.go | 129 + cmd/bulk_index.go | 135 + cmd/cluster.go | 90 + cmd/delete.go | 89 + cmd/get.go | 99 + cmd/healthcheck.go | 100 + cmd/join.go | 104 + cmd/leave.go | 89 + cmd/mapping.go | 84 + cmd/metrics.go | 84 + cmd/node.go | 90 + cmd/root.go | 17 + cmd/search.go | 101 + cmd/set.go | 94 + cmd/snapshot.go | 81 + cmd/start.go | 211 ++ cmd/variables.go | 22 + cmd/version.go | 24 + cmd/watch.go | 157 + dispatcher/grpc_client.go | 118 - dispatcher/grpc_gateway.go | 353 --- dispatcher/grpc_server.go | 94 - dispatcher/grpc_service.go | 974 ------ dispatcher/http_handler.go | 79 - dispatcher/http_server.go | 69 - dispatcher/server.go | 151 - dispatcher/server_test.go | 610 ---- docker-compose.yml | 221 -- docker-entrypoint.sh | 19 - errors/errors.go | 26 +- etc/blast.yaml | 16 + .../blast_mapping.json | 6 +- example/geo_doc_2.json | 23 - example/geo_doc_3.json | 23 - example/geo_doc_4.json | 23 - example/geo_doc_5.json | 23 - example/geo_doc_6.json | 23 - example/wiki_bulk_delete.txt | 36 - example/wiki_bulk_index.jsonl | 36 - example/wiki_doc_arwiki_1.json | 9 - example/wiki_doc_bgwiki_1.json | 9 - example/wiki_doc_cawiki_1.json | 9 - example/wiki_doc_cswiki_1.json | 9 - example/wiki_doc_dawiki_1.json | 9 - example/wiki_doc_dewiki_1.json | 9 - example/wiki_doc_elwiki_1.json | 9 - example/wiki_doc_enwiki_1.json | 9 - example/wiki_doc_eswiki_1.json | 9 - example/wiki_doc_fawiki_1.json | 9 - example/wiki_doc_fiwiki_1.json | 9 - example/wiki_doc_frwiki_1.json | 9 - example/wiki_doc_gawiki_1.json | 9 - example/wiki_doc_glwiki_1.json | 9 - example/wiki_doc_guwiki_1.json | 9 - example/wiki_doc_hiwiki_1.json | 9 - example/wiki_doc_huwiki_1.json | 9 - example/wiki_doc_hywiki_1.json | 9 - example/wiki_doc_idwiki_1.json | 9 - example/wiki_doc_itwiki_1.json | 9 - example/wiki_doc_jawiki_1.json | 9 - example/wiki_doc_knwiki_1.json | 9 - example/wiki_doc_kowiki_1.json | 9 - example/wiki_doc_mlwiki_1.json | 9 - example/wiki_doc_nlwiki_1.json | 9 - example/wiki_doc_nowiki_1.json | 9 - example/wiki_doc_pswiki_1.json | 9 - example/wiki_doc_ptwiki_1.json | 9 - example/wiki_doc_rowiki_1.json | 9 - example/wiki_doc_ruwiki_1.json | 9 - example/wiki_doc_svwiki_1.json | 9 - example/wiki_doc_tawiki_1.json | 9 - example/wiki_doc_tewiki_1.json | 9 - example/wiki_doc_thwiki_1.json | 9 - example/wiki_doc_trwiki_1.json | 9 - example/wiki_doc_zhwiki_1.json | 9 - examples/example_bulk_delete.txt | 11 + examples/example_bulk_index.json | 11 + examples/example_doc_1.json | 8 + examples/example_mapping.json | 103 + .../example_search_request.json | 0 .../example_search_request_prefix.json | 0 .../example_search_request_simple.json | 0 examples/geo_example_bulk_index.json | 6 + .../geo_example_doc_1.json | 5 +- .../geo_example_mapping.json | 22 +- .../geo_example_search_request.json | 0 .../multiple_type_example_bulk_index.json | 36 + .../multiple_type_example_mapping.json | 72 +- go.mod | 54 +- go.sum | 249 +- hashutils/hashutils.go | 32 - http/metric.go | 100 - http/response.go | 44 - indexer/grpc_client.go | 150 - indexer/grpc_gateway.go | 376 --- indexer/grpc_server.go | 94 - indexer/grpc_service.go | 1018 ------ indexer/http_handler.go | 79 - indexer/http_server.go | 69 - indexer/index.go | 290 -- indexer/raft_fsm.go | 363 --- indexer/raft_server.go | 688 ---- indexer/server.go | 362 --- indexer/server_test.go | 2177 ------------- logutils/logger.go => log/log.go | 63 +- logutils/grpc_logger.go | 80 - logutils/http_logger.go | 90 - main.go | 15 + manager/grpc_client.go | 156 - manager/grpc_gateway.go | 172 - manager/grpc_server.go | 114 - manager/grpc_service.go | 714 ----- manager/http_handler.go | 79 - manager/http_server.go | 69 - manager/raft_fsm.go | 269 -- manager/raft_fsm_test.go | 552 ---- manager/raft_server.go | 641 ---- manager/server.go | 232 -- manager/server_test.go | 2774 ----------------- .../indexutils.go => mapping/mapping.go | 26 +- maputils/error.go | 21 - maputils/maputils.go | 302 -- maputils/maputils_test.go | 679 ---- marshaler/marshaler.go | 186 ++ marshaler/util.go | 69 + marshaler/util_test.go | 109 + metric/metric.go | 895 ++++++ protobuf/distribute/distribute.pb.go | 945 ------ protobuf/distribute/distribute.pb.gw.go | 443 --- protobuf/distribute/distribute.proto | 135 - protobuf/distribute/distribute.swagger.json | 362 --- protobuf/index.pb.go | 1913 ++++++++++++ protobuf/index.pb.gw.go | 1276 ++++++++ protobuf/index.proto | 223 ++ protobuf/index/index.go | 75 - protobuf/index/index.pb.go | 2051 ------------ protobuf/index/index.pb.gw.go | 510 --- protobuf/index/index.proto | 241 -- protobuf/index/index.swagger.json | 557 ---- protobuf/management/management.pb.go | 1649 ---------- protobuf/management/management.pb.gw.go | 379 --- protobuf/management/management.proto | 203 -- protobuf/management/management.swagger.json | 409 --- protobuf/util.go | 57 - protobuf/util_test.go | 307 -- registry/type.go | 56 +- server/grpc_gateway.go | 129 + server/grpc_server.go | 129 + server/grpc_service.go | 540 ++++ server/raft_fsm.go | 400 +++ server/raft_fsm_test.go | 743 +++++ server/raft_server.go | 857 +++++ server/raft_server_test.go | 1536 +++++++++ sortutils/sort.go | 49 - storage/index.go | 269 ++ storage/index_test.go | 341 ++ strutils/strutils.go | 49 - testutils/testutils.go | 43 - util/temp.go | 29 + version/version.go | 14 - 198 files changed, 12670 insertions(+), 29590 deletions(-) create mode 100644 builtin/config_bleve.go delete mode 100644 builtins/config_badger.go delete mode 100644 builtins/config_bleve.go create mode 100644 client/grpc_client.go delete mode 100644 cmd/blast/dispatcher_delete.go delete mode 100644 cmd/blast/dispatcher_get.go delete mode 100644 cmd/blast/dispatcher_index.go delete mode 100644 cmd/blast/dispatcher_node_health.go delete mode 100644 cmd/blast/dispatcher_search.go delete mode 100644 cmd/blast/dispatcher_start.go delete mode 100644 cmd/blast/indexer_cluster_info.go delete mode 100644 cmd/blast/indexer_cluster_leave.go delete mode 100644 cmd/blast/indexer_cluster_watch.go delete mode 100644 cmd/blast/indexer_delete.go delete mode 100644 cmd/blast/indexer_get.go delete mode 100644 cmd/blast/indexer_index.go delete mode 100644 cmd/blast/indexer_node_health.go delete mode 100644 cmd/blast/indexer_node_info.go delete mode 100644 cmd/blast/indexer_search.go delete mode 100644 cmd/blast/indexer_snapshot.go delete mode 100644 cmd/blast/indexer_start.go delete mode 100644 cmd/blast/main.go delete mode 100644 cmd/blast/manager_cluster_info.go delete mode 100644 cmd/blast/manager_cluster_leave.go delete mode 100644 cmd/blast/manager_cluster_watch.go delete mode 100644 cmd/blast/manager_delete.go delete mode 100644 cmd/blast/manager_get.go delete mode 100644 cmd/blast/manager_node_health.go delete mode 100644 cmd/blast/manager_node_info.go delete mode 100644 cmd/blast/manager_set.go delete mode 100644 cmd/blast/manager_snapshot.go delete mode 100644 cmd/blast/manager_start.go delete mode 100644 cmd/blast/manager_watch.go create mode 100644 cmd/bulk_delete.go create mode 100644 cmd/bulk_index.go create mode 100644 cmd/cluster.go create mode 100644 cmd/delete.go create mode 100644 cmd/get.go create mode 100644 cmd/healthcheck.go create mode 100644 cmd/join.go create mode 100644 cmd/leave.go create mode 100644 cmd/mapping.go create mode 100644 cmd/metrics.go create mode 100644 cmd/node.go create mode 100644 cmd/root.go create mode 100644 cmd/search.go create mode 100644 cmd/set.go create mode 100644 cmd/snapshot.go create mode 100644 cmd/start.go create mode 100644 cmd/variables.go create mode 100644 cmd/version.go create mode 100644 cmd/watch.go delete mode 100644 dispatcher/grpc_client.go delete mode 100644 dispatcher/grpc_gateway.go delete mode 100644 dispatcher/grpc_server.go delete mode 100644 dispatcher/grpc_service.go delete mode 100644 dispatcher/http_handler.go delete mode 100644 dispatcher/http_server.go delete mode 100644 dispatcher/server.go delete mode 100644 dispatcher/server_test.go delete mode 100644 docker-compose.yml delete mode 100755 docker-entrypoint.sh create mode 100644 etc/blast.yaml rename example/enwiki_index_mapping.json => etc/blast_mapping.json (97%) delete mode 100644 example/geo_doc_2.json delete mode 100644 example/geo_doc_3.json delete mode 100644 example/geo_doc_4.json delete mode 100644 example/geo_doc_5.json delete mode 100644 example/geo_doc_6.json delete mode 100644 example/wiki_bulk_delete.txt delete mode 100644 example/wiki_bulk_index.jsonl delete mode 100644 example/wiki_doc_arwiki_1.json delete mode 100644 example/wiki_doc_bgwiki_1.json delete mode 100644 example/wiki_doc_cawiki_1.json delete mode 100644 example/wiki_doc_cswiki_1.json delete mode 100644 example/wiki_doc_dawiki_1.json delete mode 100644 example/wiki_doc_dewiki_1.json delete mode 100644 example/wiki_doc_elwiki_1.json delete mode 100644 example/wiki_doc_enwiki_1.json delete mode 100644 example/wiki_doc_eswiki_1.json delete mode 100644 example/wiki_doc_fawiki_1.json delete mode 100644 example/wiki_doc_fiwiki_1.json delete mode 100644 example/wiki_doc_frwiki_1.json delete mode 100644 example/wiki_doc_gawiki_1.json delete mode 100644 example/wiki_doc_glwiki_1.json delete mode 100644 example/wiki_doc_guwiki_1.json delete mode 100644 example/wiki_doc_hiwiki_1.json delete mode 100644 example/wiki_doc_huwiki_1.json delete mode 100644 example/wiki_doc_hywiki_1.json delete mode 100644 example/wiki_doc_idwiki_1.json delete mode 100644 example/wiki_doc_itwiki_1.json delete mode 100644 example/wiki_doc_jawiki_1.json delete mode 100644 example/wiki_doc_knwiki_1.json delete mode 100644 example/wiki_doc_kowiki_1.json delete mode 100644 example/wiki_doc_mlwiki_1.json delete mode 100644 example/wiki_doc_nlwiki_1.json delete mode 100644 example/wiki_doc_nowiki_1.json delete mode 100644 example/wiki_doc_pswiki_1.json delete mode 100644 example/wiki_doc_ptwiki_1.json delete mode 100644 example/wiki_doc_rowiki_1.json delete mode 100644 example/wiki_doc_ruwiki_1.json delete mode 100644 example/wiki_doc_svwiki_1.json delete mode 100644 example/wiki_doc_tawiki_1.json delete mode 100644 example/wiki_doc_tewiki_1.json delete mode 100644 example/wiki_doc_thwiki_1.json delete mode 100644 example/wiki_doc_trwiki_1.json delete mode 100644 example/wiki_doc_zhwiki_1.json create mode 100644 examples/example_bulk_delete.txt create mode 100644 examples/example_bulk_index.json create mode 100644 examples/example_doc_1.json create mode 100644 examples/example_mapping.json rename example/wiki_search_request.json => examples/example_search_request.json (100%) rename example/wiki_search_request_prefix.json => examples/example_search_request_prefix.json (100%) rename example/wiki_search_request_simple.json => examples/example_search_request_simple.json (100%) create mode 100644 examples/geo_example_bulk_index.json rename example/geo_doc_1.json => examples/geo_example_doc_1.json (89%) rename example/geo_index_mapping.json => examples/geo_example_mapping.json (60%) rename example/geo_search_request.json => examples/geo_example_search_request.json (100%) create mode 100644 examples/multiple_type_example_bulk_index.json rename example/wiki_index_mapping.json => examples/multiple_type_example_mapping.json (99%) delete mode 100644 hashutils/hashutils.go delete mode 100644 http/metric.go delete mode 100644 http/response.go delete mode 100644 indexer/grpc_client.go delete mode 100644 indexer/grpc_gateway.go delete mode 100644 indexer/grpc_server.go delete mode 100644 indexer/grpc_service.go delete mode 100644 indexer/http_handler.go delete mode 100644 indexer/http_server.go delete mode 100644 indexer/index.go delete mode 100644 indexer/raft_fsm.go delete mode 100644 indexer/raft_server.go delete mode 100644 indexer/server.go delete mode 100644 indexer/server_test.go rename logutils/logger.go => log/log.go (54%) delete mode 100644 logutils/grpc_logger.go delete mode 100644 logutils/http_logger.go create mode 100644 main.go delete mode 100644 manager/grpc_client.go delete mode 100644 manager/grpc_gateway.go delete mode 100644 manager/grpc_server.go delete mode 100644 manager/grpc_service.go delete mode 100644 manager/http_handler.go delete mode 100644 manager/http_server.go delete mode 100644 manager/raft_fsm.go delete mode 100644 manager/raft_fsm_test.go delete mode 100644 manager/raft_server.go delete mode 100644 manager/server.go delete mode 100644 manager/server_test.go rename indexutils/indexutils.go => mapping/mapping.go (63%) delete mode 100644 maputils/error.go delete mode 100644 maputils/maputils.go delete mode 100644 maputils/maputils_test.go create mode 100644 marshaler/marshaler.go create mode 100644 marshaler/util.go create mode 100644 marshaler/util_test.go create mode 100644 metric/metric.go delete mode 100644 protobuf/distribute/distribute.pb.go delete mode 100644 protobuf/distribute/distribute.pb.gw.go delete mode 100644 protobuf/distribute/distribute.proto delete mode 100644 protobuf/distribute/distribute.swagger.json create mode 100644 protobuf/index.pb.go create mode 100644 protobuf/index.pb.gw.go create mode 100644 protobuf/index.proto delete mode 100644 protobuf/index/index.go delete mode 100644 protobuf/index/index.pb.go delete mode 100644 protobuf/index/index.pb.gw.go delete mode 100644 protobuf/index/index.proto delete mode 100644 protobuf/index/index.swagger.json delete mode 100644 protobuf/management/management.pb.go delete mode 100644 protobuf/management/management.pb.gw.go delete mode 100644 protobuf/management/management.proto delete mode 100644 protobuf/management/management.swagger.json delete mode 100644 protobuf/util.go delete mode 100644 protobuf/util_test.go create mode 100644 server/grpc_gateway.go create mode 100644 server/grpc_server.go create mode 100644 server/grpc_service.go create mode 100644 server/raft_fsm.go create mode 100644 server/raft_fsm_test.go create mode 100644 server/raft_server.go create mode 100644 server/raft_server_test.go delete mode 100644 sortutils/sort.go create mode 100644 storage/index.go create mode 100644 storage/index_test.go delete mode 100644 strutils/strutils.go delete mode 100644 testutils/testutils.go create mode 100644 util/temp.go diff --git a/.gitignore b/.gitignore index ba765bc..ee0c04c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,29 +1,15 @@ -# Copyright (c) 2017 Minoru Osuka -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - .DS_Store -# Eclipse .classpath .project -# Gogland .idea/ -# Blast bin/ dist/ +*.pem +*.csr + cover.out cover.html diff --git a/CHANGES.md b/CHANGES.md index 913b3ab..37860d7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,126 +5,42 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). -## [Unreleased] +## [v0.3.1] 2020-04-01 -### Added +- Update protobuf #33 @mosuka -- Add coverage to Makefile #114 -- Docker compose #119 +## [v0.3.0] 2020-03-31 -### Changed +- Add health check endpoints #32 @mosuka +- Add some metrics #31 @mosuka +- Allow CLI options to be read from the configuration file #29 @mosuka +- Fix gateway bug #26 @mosuka +- Support TLS #25 @mosuka +- Add keepalive options #24 @mosuka +- Improve cluster watching #22 @mosuka +- Refactoring #21 @mosuka +- Update Makefile #20 @mosuka -- Bump Bleve version to v0.8.1 #117 +## [v0.2.0] 2020-03-19 +- Add join endpoint #19 @mosuka +- Add leave endpoint #18 @mosuka +- Add snapshot endpoint #17 @mosuka +- Disable raft-badgerdb logging #16 @mosuka +- Migrate to grpc-gateway #15 @mosuka +- Add metrics command #14 @mosuka +- Use raft-badger #13 @mosuka +- Refactoring #12 @mosuka +- Refactoring #11 @mosuka +- Refactoring #10 @mosuka +- Upgrade Badger #9 @mosuka +- Upgrade Raft #8 @mosuka +- Refactoring #7 @mosuka -## [v0.8.1] +## [v0.1.1] 2019-11-05 -### Added +- Fix bugs in defer #5 @mosuka -### Changed +## [v0.1.0] 2019-03-30 -- Update go version and dependencies #109 - - -## [v0.8.0] - -### Added -- Add swagger specification experimentaly #107 - -### Changed - -- New CLI #82 -- Split protobuf into components #84 -- Change subcommands #85 -- Update protobuf #86 -- Change protobuf #87 -- Change the cluster watching method #90 -- Change cluster watch command for manager #92 -- Change node state to enum from string #93 -- Change node info structure #94 -- Change protobuf for indexer and dispatcher #95 -- Change server arguments #96 -- Change index protobuf #97 -- Use protobuf document #98 -- Change node state to Node_SHUTDOWN in a error #99 -- Fix a bug for waiting to receive an indexer cluster updates from the stream #100 -- Migrate to grpc-gateway #105 - - -## [v0.7.1] - 2019-07-18 - -### Added - -- Add raft-badger #69 -- Add raft-storage-type flag #73 -- Add gRPC access logger #74 - -### Changed - -- Improve indexing performance #71 -- Remove original document #72 -- Rename config package to builtins #75 - - -## [v0.7.0] - 2019-07-03 - -### Added - -- Add GEO search example #65 - -### Changed - -- Migrate grpc-middleware #68 - - -## [v0.6.1] - 2019-06-21 - -### Added - -### Changed - -- Fix HTTP response into JSON format #64 -- Update Dockerfile #62 - - -## [v0.6.0] - 2019-06-19 - -### Added - -- Add federated search #30 -- Add cluster manager (#48) -- Add KVS HTTP handlers #46 - -### Changed - -- Update http logger #51 -- Update logutils (#50) -- Remve KVS (#49) - - -## [v0.5.0] - 2019-03-22 - -### Added - -- Support bulk update #41 -- Support Badger #38 -- Add index stats #37 -- Add Wikipedia example #35 -- Support cznicb and leveldb #34 -- Add logging #33 -- Add CHANGES.md #29 -- Add error handling for server startup #28. - -### Changed - -- Fixed some badger bugs #40 -- Restructure store package #36 -- Update examples #32 -- update Makefile #31 - - -## [v0.4.0] - 2019-03-14 - -### Changed - -- Code refactoring. +- First release @mosuka diff --git a/Dockerfile b/Dockerfile index 4da3182..f3f3173 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,18 +1,4 @@ -# Copyright (c) 2019 Minoru Osuka -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM golang:1.13.0-stretch +FROM golang:1.14.1-stretch ARG VERSION @@ -23,15 +9,15 @@ COPY . ${GOPATH}/src/github.com/mosuka/blast RUN echo "deb http://ftp.us.debian.org/debian/ jessie main contrib non-free" >> /etc/apt/sources.list && \ echo "deb-src http://ftp.us.debian.org/debian/ jessie main contrib non-free" >> /etc/apt/sources.list && \ apt-get update && \ + apt-get upgrade -y && \ apt-get install -y \ - git \ - golang \ - libicu-dev \ - libstemmer-dev \ - libleveldb-dev \ - gcc-4.8 \ - g++-4.8 \ - build-essential && \ + git \ + golang \ + libicu-dev \ + libstemmer-dev \ + gcc-4.8 \ + g++-4.8 \ + build-essential && \ apt-get clean && \ update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-6 80 && \ update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-6 80 && \ @@ -44,30 +30,29 @@ RUN echo "deb http://ftp.us.debian.org/debian/ jessie main contrib non-free" >> ./compile_libs.sh && \ cp *.so /usr/local/lib && \ cd ${GOPATH}/src/github.com/mosuka/blast && \ - make \ - GOOS=linux \ - GOARCH=amd64 \ - CGO_ENABLED=1 \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - VERSION="${VERSION}" \ - build + make GOOS=linux \ + GOARCH=amd64 \ + CGO_ENABLED=1 \ + BUILD_TAGS="kagome icu libstemmer cld2" \ + VERSION="${VERSION}" \ + build FROM debian:stretch-slim MAINTAINER Minoru Osuka "minoru.osuka@gmail.com" RUN apt-get update && \ + apt-get upgrade -y && \ apt-get install -y \ - libicu-dev \ - libstemmer-dev \ - libleveldb-dev && \ - apt-get clean + libicu-dev \ + libstemmer-dev && \ + apt-get clean && \ + rm -rf /var/cache/apk/* COPY --from=0 /go/src/github.com/blevesearch/cld2/cld2/internal/*.so /usr/local/lib/ COPY --from=0 /go/src/github.com/mosuka/blast/bin/* /usr/bin/ -COPY --from=0 /go/src/github.com/mosuka/blast/docker-entrypoint.sh /usr/bin/ -EXPOSE 2000 5000 6000 8000 +EXPOSE 7000 8000 9000 -ENTRYPOINT [ "/usr/bin/docker-entrypoint.sh" ] -CMD [ "blast", "--help" ] +ENTRYPOINT [ "/usr/bin/blast" ] +CMD [ "start" ] diff --git a/Makefile b/Makefile index ea6c4a4..a47d17d 100644 --- a/Makefile +++ b/Makefile @@ -1,33 +1,19 @@ -# Copyright (c) 2019 Minoru Osuka -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - GOOS ?= GOARCH ?= GO111MODULE ?= on CGO_ENABLED ?= 0 CGO_CFLAGS ?= CGO_LDFLAGS ?= -BUILD_TAGS ?= -BIN_EXT ?= +BUILD_TAGS ?= kagome VERSION ?= +BIN_EXT ?= DOCKER_REPOSITORY ?= mosuka PACKAGES = $(shell $(GO) list ./... | grep -v '/vendor/') PROTOBUFS = $(shell find . -name '*.proto' -print0 | xargs -0 -n1 dirname | sort | uniq | grep -v /vendor/) -TARGET_PACKAGES = $(shell find . -name 'main.go' -print0 | xargs -0 -n1 dirname | sort | uniq | grep -v /vendor/) +TARGET_PACKAGES = $(shell find $(CURDIR) -name 'main.go' -print0 | xargs -0 -n1 dirname | sort | uniq | grep -v /vendor/) GRPC_GATEWAY_PATH = $(shell $(GO) list -m -f "{{.Dir}}" github.com/grpc-ecosystem/grpc-gateway) @@ -42,7 +28,7 @@ endif ifeq ($(VERSION),) VERSION = latest endif -LDFLAGS = -ldflags "-s -w -X \"github.com/mosuka/blast/version.Version=$(VERSION)\"" +LDFLAGS = -ldflags "-X \"github.com/mosuka/blast/version.Version=$(VERSION)\"" ifeq ($(GOOS),windows) BIN_EXT = .exe @@ -52,16 +38,9 @@ GO := GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=$(CGO_ENABLED) CGO_CFLAGS=$(CGO_ .DEFAULT_GOAL := build -.PHONY: clean -clean: - @echo ">> cleaning binaries" - rm -rf ./bin - rm -rf ./data - rm -rf ./dist - -.PHONY: echo-env -echo-env: - @echo ">> echo environment variables" +.PHONY: show-env +show-env: + @echo ">> show env" @echo " GOOS = $(GOOS)" @echo " GOARCH = $(GOARCH)" @echo " GO111MODULE = $(GO111MODULE)" @@ -69,58 +48,68 @@ echo-env: @echo " CGO_CFLAGS = $(CGO_CFLAGS)" @echo " CGO_LDFLAGS = $(CGO_LDFLAGS)" @echo " BUILD_TAGS = $(BUILD_TAGS)" - @echo " BIN_EXT = $(BIN_EXT)" @echo " VERSION = $(VERSION)" + @echo " BIN_EXT = $(BIN_EXT)" @echo " DOCKER_REPOSITORY = $(DOCKER_REPOSITORY)" + @echo " LDFLAGS = $(LDFLAGS)" @echo " PACKAGES = $(PACKAGES)" @echo " PROTOBUFS = $(PROTOBUFS)" @echo " TARGET_PACKAGES = $(TARGET_PACKAGES)" - @echo " LDFLAGS = $(LDFLAGS)" @echo " GRPC_GATEWAY_PATH = $(GRPC_GATEWAY_PATH)" -.PHONY: format -format: - @echo ">> formatting code" - @$(GO) fmt $(PACKAGES) - .PHONY: protoc -protoc: echo-env +protoc: show-env @echo ">> generating proto3 code" - @echo " GRPC_GATEWAY_PATH = $(GRPC_GATEWAY_PATH)" - @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --go_out=plugins=grpc:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done - @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --grpc-gateway_out=logtostderr=true,allow_delete_body=true:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done - @for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --proto_path=$$proto_dir --swagger_out=logtostderr=true,allow_delete_body=true:. $$proto_dir/*.proto || exit 1; done + for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=$$proto_dir --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --go_out=plugins=grpc:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done + for proto_dir in $(PROTOBUFS); do echo $$proto_dir; protoc --proto_path=. --proto_path=$$proto_dir --proto_path=${GRPC_GATEWAY_PATH} --proto_path=${GRPC_GATEWAY_PATH}/third_party/googleapis --grpc-gateway_out=logtostderr=true,allow_delete_body=true:$(GOPATH)/src $$proto_dir/*.proto || exit 1; done + +.PHONY: format +format: show-env + @echo ">> formatting code" + $(GO) fmt $(PACKAGES) .PHONY: test -test: echo-env +test: show-env @echo ">> testing all packages" - @$(GO) test -v -tags="$(BUILD_TAGS)" $(PACKAGES) + $(GO) test -v -tags="$(BUILD_TAGS)" $(PACKAGES) .PHONY: coverage -coverage: echo-env +coverage: show-env @echo ">> checking coverage of all packages" $(GO) test -coverprofile=./cover.out -tags="$(BUILD_TAGS)" $(PACKAGES) $(GO) tool cover -html=cover.out -o cover.html +.PHONY: clean +clean: show-env + @echo ">> cleaning binaries" + rm -rf ./bin + rm -rf ./data + rm -rf ./dist + .PHONY: build -build: echo-env +build: show-env @echo ">> building binaries" for target_pkg in $(TARGET_PACKAGES); do echo $$target_pkg; $(GO) build -tags="$(BUILD_TAGS)" $(LDFLAGS) -o ./bin/`basename $$target_pkg`$(BIN_EXT) $$target_pkg || exit 1; done .PHONY: install -install: echo-env +install: show-env @echo ">> installing binaries" for target_pkg in $(TARGET_PACKAGES); do echo $$target_pkg; $(GO) install -tags="$(BUILD_TAGS)" $(LDFLAGS) $$target_pkg || exit 1; done .PHONY: dist -dist: echo-env +dist: show-env @echo ">> packaging binaries" mkdir -p ./dist/$(GOOS)-$(GOARCH)/bin for target_pkg in $(TARGET_PACKAGES); do echo $$target_pkg; $(GO) build -tags="$(BUILD_TAGS)" $(LDFLAGS) -o ./dist/$(GOOS)-$(GOARCH)/bin/`basename $$target_pkg`$(BIN_EXT) $$target_pkg || exit 1; done (cd ./dist/$(GOOS)-$(GOARCH); tar zcfv ../blast-${VERSION}.$(GOOS)-$(GOARCH).tar.gz .) -.PHONY: git-tag -git-tag: echo-env +.PHONY: list-tag +list-tag: + @echo ">> listing github tags" + git tag -l --sort=-v:refname + +.PHONY: tag +tag: show-env @echo ">> tagging github" ifeq ($(VERSION),$(filter $(VERSION),latest master "")) @echo "please specify VERSION" @@ -130,18 +119,22 @@ else endif .PHONY: docker-build -docker-build: echo-env +docker-build: show-env @echo ">> building docker container image" docker build -t $(DOCKER_REPOSITORY)/blast:latest --build-arg VERSION=$(VERSION) . docker tag $(DOCKER_REPOSITORY)/blast:latest $(DOCKER_REPOSITORY)/blast:$(VERSION) .PHONY: docker-push -docker-push: echo-env +docker-push: show-env @echo ">> pushing docker container image" docker push $(DOCKER_REPOSITORY)/blast:latest docker push $(DOCKER_REPOSITORY)/blast:$(VERSION) -.PHONY: docker-pull -docker-pull: echo-env - @echo ">> pulling docker container image" - docker pull $(DOCKER_REPOSITORY):$(VERSION) +.PHONY: docker-clean +docker-clean: show-env + docker rmi -f $(shell docker images --filter "dangling=true" -q --no-trunc) + +.PHONY: cert +cert: show-env + @echo ">> generating certification" + openssl req -x509 -nodes -newkey rsa:4096 -keyout ./etc/blast_key.pem -out ./etc/blast_cert.pem -days 365 -subj '/CN=localhost' diff --git a/README.md b/README.md index 421b837..0d10c58 100644 --- a/README.md +++ b/README.md @@ -1,22 +1,3 @@ - - # Blast Blast is a full-text search and indexing server written in [Go](https://golang.org) built on top of [Bleve](http://www.blevesearch.com). @@ -31,16 +12,14 @@ Blast makes it easy for programmers to develop search applications with advanced - Faceted search - Spatial/Geospatial search - Search result highlighting -- Distributed search/indexing - Index replication - Bringing up cluster -- Cluster Federation - An easy-to-use HTTP API - CLI is available - Docker container image is available -## Installing dependencies +## Install build dependencies Blast requires some C/C++ libraries if you need to enable cld2, icu, libstemmer or leveldb. The following sections are instructions for satisfying dependencies on particular platforms. @@ -90,63 +69,73 @@ $ sudo cp *.so /usr/local/lib ``` -## Building Blast +## Build -When you satisfied dependencies, let's build Blast for Linux as following: +Building Blast as following: ```bash $ mkdir -p ${GOPATH}/src/github.com/mosuka $ cd ${GOPATH}/src/github.com/mosuka $ git clone https://github.com/mosuka/blast.git $ cd blast -$ make build +$ make +``` + +If you omit `GOOS` or `GOARCH`, it will build the binary of the platform you are using. +If you want to specify the target platform, please set `GOOS` and `GOARCH` environment variables. + +### Linux + +```bash +$ make GOOS=linux build ``` -If you want to build for other platform, set `GOOS`, `GOARCH` environment variables. For example, build for macOS like following: +### macOS ```bash -$ make \ - GOOS=darwin \ - build +$ make GOOS=darwin build ``` -Blast supports some [Bleve Extensions (blevex)](https://github.com/blevesearch/blevex). If you want to build with them, please set `CGO_LDFLAGS`, `CGO_CFLAGS`, `CGO_ENABLED` and `BUILD_TAGS`. For example, build LevelDB to be available for index storage as follows: +### Windows ```bash -$ make \ - GOOS=linux \ - BUILD_TAGS=leveldb \ - CGO_ENABLED=1 \ - build +$ make GOOS=windows build ``` -You can enable all the Bleve extensions supported by Blast as follows: +## Build with extensions + +Blast supports some Bleve Extensions (blevex). If you want to build with them, please set CGO_LDFLAGS, CGO_CFLAGS, CGO_ENABLED and BUILD_TAGS. For example, build LevelDB to be available for index storage as follows: + +```bash +$ make GOOS=linux \ + BUILD_TAGS=icu \ + CGO_ENABLED=1 \ + build +``` -### Linux +### Linux ```bash -$ make \ - GOOS=linux \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - CGO_ENABLED=1 \ - build +$ make GOOS=linux \ + BUILD_TAGS="kagome icu libstemmer cld2" \ + CGO_ENABLED=1 \ + build ``` ### macOS ```bash -$ make \ - GOOS=darwin \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - CGO_ENABLED=1 \ - CGO_LDFLAGS="-L/usr/local/opt/icu4c/lib" \ - CGO_CFLAGS="-I/usr/local/opt/icu4c/include" \ - build +$ make GOOS=darwin \ + BUILD_TAGS="kagome icu libstemmer cld2" \ + CGO_ENABLED=1 \ + CGO_LDFLAGS="-L/usr/local/opt/icu4c/lib" \ + CGO_CFLAGS="-I/usr/local/opt/icu4c/include" \ + build ``` -### Build flags +### Buil flags -Please refer to the following table for details of Bleve Extensions: +Refer to the following table for the build flags of the supported Bleve extensions: | BUILD_TAGS | CGO_ENABLED | Description | | ---------- | ----------- | ----------- | @@ -154,13 +143,11 @@ Please refer to the following table for details of Bleve Extensions: | kagome | 0 | Enable Japanese Language Analyser | | icu | 1 | Enable ICU Tokenizer, Thai Language Analyser | | libstemmer | 1 | Enable Language Stemmer (Danish, German, English, Spanish, Finnish, French, Hungarian, Italian, Dutch, Norwegian, Portuguese, Romanian, Russian, Swedish, Turkish) | -| cznicb | 0 | Enable cznicb KV store | -| leveldb | 1 | Enable LevelDB | -| badger | 0 | Enable Badger (This feature is considered experimental) | -If you want to enable the feature whose `CGO_ENABLE` is `1`, please install it referring to the Installing dependencies section above. +If you want to enable the feature whose `CGO_ENABLE` is `1`, please install it referring to the Install build dependencies section above. -### Binaries + +## Binary You can see the binary file when build successful like so: @@ -170,718 +157,365 @@ blast ``` -## Testing Blast +## Test If you want to test your changes, run command like following: ```bash -$ make \ - test +$ make test ``` -You can test with all the Bleve extensions supported by Blast as follows: +If you want to specify the target platform, set `GOOS` and `GOARCH` environment variables in the same way as the build. -### Linux -```bash -$ make \ - GOOS=linux \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - CGO_ENABLED=1 \ - test -``` +## Package -### macOS +To create a distribution package, run the following command: ```bash -$ make \ - GOOS=darwin \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - CGO_ENABLED=1 \ - CGO_LDFLAGS="-L/usr/local/opt/icu4c/lib" \ - CGO_CFLAGS="-I/usr/local/opt/icu4c/include" \ - test +$ make dist ``` -## Packaging Blast +## Configure -### Linux +Blast can change its startup options with configuration files, environment variables, and command line arguments. +Refer to the following table for the options that can be configured. -```bash -$ make \ - GOOS=linux \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - CGO_ENABLED=1 \ - dist -``` +| CLI Flag | Environment variable | Configuration File | Description | +| --- | --- | --- | --- | +| --config-file | - | - | config file. if omitted, blast.yaml in /etc and home directory will be searched | +| --id | BLAST_ID | id | node ID | +| --raft-address | BLAST_RAFT_ADDRESS | raft_address | Raft server listen address | +| --grpc-address | BLAST_GRPC_ADDRESS | grpc_address | gRPC server listen address | +| --http-address | BLAST_HTTP_ADDRESS | http_address | HTTP server listen address | +| --data-directory | BLAST_DATA_DIRECTORY | data_directory | data directory which store the index and Raft logs | +| --mapping-file | BLAST_MAPPING_FILE | mapping_file | path to the index mapping file | +| --peer-grpc-address | BLAST_PEER_GRPC_ADDRESS | peer_grpc_address | listen address of the existing gRPC server in the joining cluster | +| --certificate-file | BLAST_CERTIFICATE_FILE | certificate_file | path to the client server TLS certificate file | +| --key-file | BLAST_KEY_FILE | key_file | path to the client server TLS key file | +| --common-name | BLAST_COMMON_NAME | common_name | certificate common name | +| --log-level | BLAST_LOG_LEVEL | log_level | log level | +| --log-file | BLAST_LOG_FILE | log_file | log file | +| --log-max-size | BLAST_LOG_MAX_SIZE | log_max_size | max size of a log file in megabytes | +| --log-max-backups | BLAST_LOG_MAX_BACKUPS | log_max_backups | max backup count of log files | +| --log-max-age | BLAST_LOG_MAX_AGE | log_max_age | max age of a log file in days | +| --log-compress | BLAST_LOG_COMPRESS | log_compress | compress a log file | -### macOS -```bash -$ make \ - GOOS=darwin \ - BUILD_TAGS="kagome icu libstemmer cld2 cznicb leveldb badger" \ - CGO_ENABLED=1 \ - CGO_LDFLAGS="-L/usr/local/opt/icu4c/lib" \ - CGO_CFLAGS="-I/usr/local/opt/icu4c/include" \ - dist -``` +## Start +Starting server is easy as follows: -## Starting Blast in standalone mode - -![standalone](https://user-images.githubusercontent.com/970948/59768879-138f5180-92e0-11e9-8b33-c7b1a93e0893.png) +```bash +$ ./bin/blast start \ + --id=node1 \ + --raft-address=:7000 \ + --http-address=:8000 \ + --grpc-address=:9000 \ + --data-directory=/tmp/blast/node1 \ + --mapping-file=./examples/example_mapping.json +``` -Running a Blast in standalone mode is easy. Start a indexer like so: +You can get the node information with the following command: ```bash -$ ./bin/blast indexer start \ - --grpc-address=:5000 \ - --grpc-gateway-address=:6000 \ - --http-address=:8000 \ - --node-id=indexer1 \ - --node-address=:2000 \ - --data-dir=/tmp/blast/indexer1 \ - --raft-storage-type=boltdb \ - --index-mapping-file=./example/wiki_index_mapping.json \ - --index-type=upside_down \ - --index-storage-type=boltdb +$ ./bin/blast node | jq . ``` -Please refer to following document for details of index mapping: -- http://blevesearch.com/docs/Terminology/ -- http://blevesearch.com/docs/Text-Analysis/ -- http://blevesearch.com/docs/Index-Mapping/ -- https://github.com/blevesearch/bleve/blob/master/mapping/index.go#L43 - -You can check the node with the following command: +or the following URL: ```bash -$ ./bin/blast indexer node info --grpc-address=:5000 | jq . +$ curl -X GET http://localhost:8000/v1/node | jq . ``` -You can see the result in JSON format. The result of the above command is: +The result of the above command is: ```json { "node": { - "id": "indexer1", - "bind_address": ":2000", - "state": 3, + "raft_address": ":7000", "metadata": { - "grpc_address": ":5000", - "grpc_gateway_address": ":6000", + "grpc_address": ":9000", "http_address": ":8000" - } + }, + "state": "Leader" } } ``` -You can now put, get, search and delete the documents via CLI. - -### Indexing a document via CLI +## Health check -For document indexing, execute the following command: +You can check the health status of the node. ```bash -$ ./bin/blast indexer index --grpc-address=:5000 enwiki_1 ' -{ - "fields": { - "title_en": "Search engine (computing)", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "_type": "enwiki" - } -} -' | jq . +$ ./bin/blast healthcheck | jq . ``` -or +Also provides the following REST APIs -```bash -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/wiki_doc_enwiki_1.json | jq . -``` +### Liveness prove -You can see the result in JSON format. The result of the above command is: +This endpoint always returns 200 and should be used to check server health. -```json -{} +```bash +$ curl -X GET http://localhost:8000/v1/liveness_check | jq . ``` -### Getting a document via CLI +### Readiness probe -Getting a document is as following: +This endpoint returns 200 when server is ready to serve traffic (i.e. respond to queries). ```bash -$ ./bin/blast indexer get --grpc-address=:5000 enwiki_1 | jq . +$ curl -X GET http://localhost:8000/v1/readiness_check | jq . ``` -You can see the result in JSON format. The result of the above command is: +## Put a document -```json +To put a document, execute the following command: + +```bash +$ ./bin/blast set 1 ' { "fields": { - "_type": "enwiki", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" + "_type": "example" } } +' | jq . ``` -### Searching documents via CLI - -Searching documents is as like following: +or, you can use the RESTful API as follows: ```bash -$ ./bin/blast indexer search --grpc-address=:5000 --file=./example/wiki_search_request.json | jq . -``` - -You can see the result in JSON format. The result of the above command is: - -```json +$ curl -X PUT 'http://127.0.0.1:8000/v1/documents/1' --data-binary ' { - "search_result": { - "status": { - "total": 1, - "failed": 0, - "successful": 1 - }, - "request": { - "query": { - "query": "+_all:search" - }, - "size": 10, - "from": 0, - "highlight": { - "style": "html", - "fields": [ - "title", - "text" - ] - }, - "fields": [ - "*" - ], - "facets": { - "Timestamp range": { - "size": 10, - "field": "timestamp", - "date_ranges": [ - { - "end": "2010-12-31T23:59:59Z", - "name": "2001 - 2010", - "start": "2001-01-01T00:00:00Z" - }, - { - "end": "2020-12-31T23:59:59Z", - "name": "2011 - 2020", - "start": "2011-01-01T00:00:00Z" - } - ] - }, - "Type count": { - "size": 10, - "field": "_type" - } - }, - "explain": false, - "sort": [ - "-_score", - "_id", - "-timestamp" - ], - "includeLocations": false - }, - "hits": [ - { - "index": "/tmp/blast/indexer1/index", - "id": "enwiki_1", - "score": 0.09703538256409851, - "locations": { - "text_en": { - "search": [ - { - "pos": 2, - "start": 2, - "end": 8, - "array_positions": null - }, - { - "pos": 20, - "start": 118, - "end": 124, - "array_positions": null - }, - { - "pos": 33, - "start": 195, - "end": 201, - "array_positions": null - }, - { - "pos": 68, - "start": 415, - "end": 421, - "array_positions": null - }, - { - "pos": 73, - "start": 438, - "end": 444, - "array_positions": null - }, - { - "pos": 76, - "start": 458, - "end": 466, - "array_positions": null - } - ] - }, - "title_en": { - "search": [ - { - "pos": 1, - "start": 0, - "end": 6, - "array_positions": null - } - ] - } - }, - "sort": [ - "_score", - "enwiki_1", - " \u0001\u0015\u001f\u0004~80Pp\u0000" - ], - "fields": { - "_type": "enwiki", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" - } - } - ], - "total_hits": 1, - "max_score": 0.09703538256409851, - "took": 122105, - "facets": { - "Timestamp range": { - "field": "timestamp", - "total": 1, - "missing": 0, - "other": 0, - "date_ranges": [ - { - "name": "2011 - 2020", - "start": "2011-01-01T00:00:00Z", - "end": "2020-12-31T23:59:59Z", - "count": 1 - } - ] - }, - "Type count": { - "field": "_type", - "total": 1, - "missing": 0, - "other": 0, - "terms": [ - { - "term": "enwiki", - "count": 1 - } - ] - } - } + "fields": { + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "_type": "example" } } +' | jq . ``` -Please refer to following document for details of search request and result: -- http://blevesearch.com/docs/Query/ -- http://blevesearch.com/docs/Query-String-Query/ -- http://blevesearch.com/docs/Sorting/ -- https://github.com/blevesearch/bleve/blob/master/search.go#L267 -- https://github.com/blevesearch/bleve/blob/master/search.go#L443 - -### Deleting a document via CLI - -Deleting a document is as following: +or ```bash -$ ./bin/blast indexer delete --grpc-address=:5000 enwiki_1 -``` - -You can see the result in JSON format. The result of the above command is: - -```json -{} +$ curl -X PUT 'http://127.0.0.1:8000/v1/documents/1' -H "Content-Type: application/json" --data-binary @./examples/example_doc_1.json ``` -### Indexing documents in bulk via CLI +## Get a document -Indexing documents in bulk, run the following command: +To get a document, execute the following command: ```bash -$ ./bin/blast indexer index --grpc-address=:5000 --file=./example/wiki_bulk_index.jsonl --bulk | jq . +$ ./bin/blast get 1 | jq . ``` -You can see the result in JSON format. The result of the above command is: - -```json -{ - "count": 36 -} -``` - -### Deleting documents in bulk via CLI - -Deleting documents in bulk, run the following command: +or, you can use the RESTful API as follows: ```bash -$ ./bin/blast indexer delete --grpc-address=:5000 --file=./example/wiki_bulk_delete.txt | jq . +$ curl -X GET 'http://127.0.0.1:8000/v1/documents/1' | jq . ``` -You can see the result in JSON format. The result of the above command is: +You can see the result. The result of the above command is: ```json { - "count": 36 + "fields": { + "_type": "example", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "title": "Search engine (computing)" + } } ``` +## Search documents -## Using HTTP REST API - -Also you can do above commands via HTTP REST API that listened port 5002. - -### Indexing a document via HTTP REST API - -Indexing a document via HTTP is as following: +To search documents, execute the following command: ```bash -$ curl -X PUT 'http://127.0.0.1:6000/v1/documents/enwiki_1' -H 'Content-Type: application/json' --data-binary ' +$ ./bin/blast search ' { - "fields": { - "title_en": "Search engine (computing)", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "_type": "enwiki" + "search_request": { + "query": { + "query": "+_all:search" + }, + "size": 10, + "from": 0, + "fields": [ + "*" + ], + "sort": [ + "-_score" + ] } } ' | jq . ``` -or +or, you can use the RESTful API as follows: ```bash -$ curl -X PUT 'http://127.0.0.1:6000/v1/documents' -H 'Content-Type: application/json' --data-binary @./example/wiki_doc_enwiki_1.json | jq . -``` - -You can see the result in JSON format. The result of the above command is: - -```json -{} -``` - -### Getting a document via HTTP REST API - -Getting a document via HTTP is as following: - -```bash -$ curl -X GET 'http://127.0.0.1:6000/v1/documents/enwiki_1' -H 'Content-Type: application/json' | jq . -``` - -You can see the result in JSON format. The result of the above command is: - -```json +$ curl -X POST 'http://127.0.0.1:8000/v1/search' --data-binary ' { - "fields": { - "_type": "enwiki", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" + "search_request": { + "query": { + "query": "+_all:search" + }, + "size": 10, + "from": 0, + "fields": [ + "*" + ], + "sort": [ + "-_score" + ] } } +' | jq . ``` -### Searching documents via HTTP REST API - -Searching documents via HTTP is as following: - -```bash -$ curl -X POST 'http://127.0.0.1:6000/v1/search' -H 'Content-Type: application/json' --data-binary @./example/wiki_search_request.json | jq . -``` - -You can see the result in JSON format. The result of the above command is: +You can see the result. The result of the above command is: ```json { "search_result": { - "status": { - "total": 1, - "failed": 0, - "successful": 1 - }, - "request": { - "query": { - "query": "+_all:search" - }, - "size": 10, - "from": 0, - "highlight": { - "style": "html", - "fields": [ - "title", - "text" - ] - }, - "fields": [ - "*" - ], - "facets": { - "Timestamp range": { - "size": 10, - "field": "timestamp", - "date_ranges": [ - { - "end": "2010-12-31T23:59:59Z", - "name": "2001 - 2010", - "start": "2001-01-01T00:00:00Z" - }, - { - "end": "2020-12-31T23:59:59Z", - "name": "2011 - 2020", - "start": "2011-01-01T00:00:00Z" - } - ] - }, - "Type count": { - "size": 10, - "field": "_type" - } - }, - "explain": false, - "sort": [ - "-_score", - "_id", - "-timestamp" - ], - "includeLocations": false - }, + "facets": null, "hits": [ { - "index": "/tmp/blast/indexer1/index", - "id": "enwiki_1", - "score": 0.09703538256409851, - "locations": { - "text_en": { - "search": [ - { - "pos": 2, - "start": 2, - "end": 8, - "array_positions": null - }, - { - "pos": 20, - "start": 118, - "end": 124, - "array_positions": null - }, - { - "pos": 33, - "start": 195, - "end": 201, - "array_positions": null - }, - { - "pos": 68, - "start": 415, - "end": 421, - "array_positions": null - }, - { - "pos": 73, - "start": 438, - "end": 444, - "array_positions": null - }, - { - "pos": 76, - "start": 458, - "end": 466, - "array_positions": null - } - ] - }, - "title_en": { - "search": [ - { - "pos": 1, - "start": 0, - "end": 6, - "array_positions": null - } - ] - } - }, - "sort": [ - "_score", - "enwiki_1", - " \u0001\u0015\u001f\u0004~80Pp\u0000" - ], "fields": { - "_type": "enwiki", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "_type": "example", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" - } + "title": "Search engine (computing)" + }, + "id": "1", + "index": "/tmp/blast/node1/index", + "score": 0.09703538256409851, + "sort": [ + "_score" + ] } ], - "total_hits": 1, "max_score": 0.09703538256409851, - "took": 323568, - "facets": { - "Timestamp range": { - "field": "timestamp", - "total": 1, - "missing": 0, - "other": 0, - "date_ranges": [ - { - "name": "2011 - 2020", - "start": "2011-01-01T00:00:00Z", - "end": "2020-12-31T23:59:59Z", - "count": 1 - } - ] + "request": { + "explain": false, + "facets": null, + "fields": [ + "*" + ], + "from": 0, + "highlight": null, + "includeLocations": false, + "query": { + "query": "+_all:search" }, - "Type count": { - "field": "_type", - "total": 1, - "missing": 0, - "other": 0, - "terms": [ - { - "term": "enwiki", - "count": 1 - } - ] - } - } + "search_after": null, + "search_before": null, + "size": 10, + "sort": [ + "-_score" + ] + }, + "status": { + "failed": 0, + "successful": 1, + "total": 1 + }, + "took": 171880, + "total_hits": 1 } } ``` -### Deleting a document via HTTP REST API +## Delete a document -Deleting a document via HTTP is as following: +Deleting a document, execute the following command: ```bash -$ curl -X DELETE 'http://127.0.0.1:6000/v1/documents/enwiki_1' -H 'Content-Type: application/json' | jq . +$ ./bin/blast delete 1 ``` -You can see the result in JSON format. The result of the above command is: +or, you can use the RESTful API as follows: -```json -{} +```bash +$ curl -X DELETE 'http://127.0.0.1:8000/v1/documents/1' ``` -### Indexing documents in bulk via HTTP REST API +## Index documents in bulk -Indexing documents in bulk via HTTP is as following: +To index documents in bulk, execute the following command: ```bash -$ curl -X PUT 'http://127.0.0.1:6000/v1/bulk' -H 'Content-Type: application/x-ndjson' --data-binary @./example/wiki_bulk_index.jsonl | jq . +$ ./bin/blast bulk-index --file ./examples/example_bulk_index.json ``` -You can see the result in JSON format. The result of the above command is: +or, you can use the RESTful API as follows: -```json -{ - "count": 36 -} +```bash +$ curl -X PUT 'http://127.0.0.1:8000/v1/documents' -H "Content-Type: application/x-ndjson" --data-binary @./examples/example_bulk_index.json ``` -### Deleting documents in bulk via HTTP REST API +## Delete documents in bulk -Deleting documents in bulk via HTTP is as following: +To delete documents in bulk, execute the following command: ```bash -$ curl -X DELETE 'http://127.0.0.1:6000/v1/bulk' -H 'Content-Type: text/plain' --data-binary @./example/wiki_bulk_delete.txt | jq . +$ ./bin/blast bulk-delete --file ./examples/example_bulk_delete.txt ``` -You can see the result in JSON format. The result of the above command is: +or, you can use the RESTful API as follows: -```json -{ - "count": 36 -} +```bash +$ curl -X DELETE 'http://127.0.0.1:8000/v1/documents' -H "Content-Type: text/plain" --data-binary @./examples/example_bulk_delete.txt ``` +## Bringing up a cluster -## Starting Blast in cluster mode - -![cluster](https://user-images.githubusercontent.com/970948/59768677-bf846d00-92df-11e9-8a70-92496ff55ce7.png) - -Blast can easily bring up a cluster. Running a Blast in standalone is not fault tolerant. If you need to improve fault tolerance, start two more indexers as follows: - -First of all, start a indexer in standalone. +Blast is easy to bring up the cluster. the node is already running, but that is not fault tolerant. If you need to increase the fault tolerance, bring up 2 more data nodes like so: ```bash -$ ./bin/blast indexer start \ - --grpc-address=:5000 \ - --grpc-gateway-address=:6000 \ - --http-address=:8000 \ - --node-id=indexer1 \ - --node-address=:2000 \ - --data-dir=/tmp/blast/indexer1 \ - --raft-storage-type=boltdb \ - --index-mapping-file=./example/wiki_index_mapping.json \ - --index-type=upside_down \ - --index-storage-type=boltdb +$ ./bin/blast start \ + --id=node2 \ + --raft-address=:7001 \ + --http-address=:8001 \ + --grpc-address=:9001 \ + --peer-grpc-address=:9000 \ + --data-directory=/tmp/blast/node2 \ + --mapping-file=./examples/example_mapping.json ``` -Then, start two more indexers. - ```bash -$ ./bin/blast indexer start \ - --peer-grpc-address=:5000 \ - --grpc-address=:5010 \ - --grpc-gateway-address=:6010 \ - --http-address=:8010 \ - --node-id=indexer2 \ - --node-address=:2010 \ - --data-dir=/tmp/blast/indexer2 \ - --raft-storage-type=boltdb - -$ ./bin/blast indexer start \ - --peer-grpc-address=:5000 \ - --grpc-address=:5020 \ - --grpc-gateway-address=:6020 \ - --http-address=:8020 \ - --node-id=indexer3 \ - --node-address=:2020 \ - --data-dir=/tmp/blast/indexer3 \ - --raft-storage-type=boltdb +$ ./bin/blast start \ + --id=node3 \ + --raft-address=:7002 \ + --http-address=:8002 \ + --grpc-address=:9002 \ + --peer-grpc-address=:9000 \ + --data-directory=/tmp/blast/node3 \ + --mapping-file=./examples/example_mapping.json ``` + _Above example shows each Blast node running on the same host, so each node must listen on different ports. This would not be necessary if each node ran on a different host._ -This instructs each new node to join an existing node, specifying `--peer-addr=:5001`. Each node recognizes the joining clusters when started. -So you have a 3-node cluster. That way you can tolerate the failure of 1 node. You can check the peers in the cluster with the following command: +This instructs each new node to join an existing node, each node recognizes the joining clusters when started. +So you have a 3-node cluster. That way you can tolerate the failure of 1 node. You can check the cluster with the following command: ```bash -$ ./bin/blast indexer cluster info --grpc-address=:5000 | jq . +$ ./bin/blast cluster | jq . ``` -or +or, you can use the RESTful API as follows: ```bash -$ curl -X GET 'http://127.0.0.1:6000/v1/cluster/status' -H 'Content-Type: application/json' | jq . +$ curl -X GET 'http://127.0.0.1:8000/v1/cluster' | jq . ``` You can see the result in JSON format. The result of the above command is: @@ -890,249 +524,121 @@ You can see the result in JSON format. The result of the above command is: { "cluster": { "nodes": { - "indexer1": { - "id": "indexer1", - "bind_address": ":2000", - "state": 1, + "node1": { + "raft_address": ":7000", "metadata": { - "grpc_address": ":5000", - "grpc_gateway_address": ":6000", + "grpc_address": ":9000", "http_address": ":8000" - } + }, + "state": "Leader" }, - "indexer2": { - "id": "indexer2", - "bind_address": ":2010", - "state": 1, + "node2": { + "raft_address": ":7001", "metadata": { - "grpc_address": ":5010", - "grpc_gateway_address": ":6010", - "http_address": ":8010" - } + "grpc_address": ":9001", + "http_address": ":8001" + }, + "state": "Follower" }, - "indexer3": { - "id": "indexer3", - "bind_address": ":2020", - "state": 3, + "node3": { + "raft_address": ":7002", "metadata": { - "grpc_address": ":5020", - "grpc_gateway_address": ":6020", - "http_address": ":8020" - } + "grpc_address": ":9002", + "http_address": ":8002" + }, + "state": "Follower" } - } + }, + "leader": "node1" } } ``` Recommend 3 or more odd number of nodes in the cluster. In failure scenarios, data loss is inevitable, so avoid deploying single nodes. -The following command indexes documents to any node in the cluster: +The above example, the node joins to the cluster at startup, but you can also join the node that already started on standalone mode to the cluster later, as follows: ```bash -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/wiki_doc_enwiki_1.json | jq . +$ ./bin/blast join --grpc-address=:9000 node2 127.0.0.1:9001 ``` -So, you can get the document from the node specified by the above command as follows: +or, you can use the RESTful API as follows: ```bash -$ ./bin/blast indexer get --grpc-address=:5000 enwiki_1 | jq . -``` - -You can see the result in JSON format. The result of the above command is: - -```json +$ curl -X PUT 'http://127.0.0.1:8000/v1/cluster/node2' --data-binary ' { - "fields": { - "_type": "enwiki", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" + "raft_address": ":7001", + "metadata": { + "grpc_address": ":9001", + "http_address": ":8001" } } +' ``` -You can also get the same document from other nodes in the cluster as follows: +To remove a node from the cluster, execute the following command: ```bash -$ ./bin/blast indexer get --grpc-address=:5010 enwiki_1 | jq . -$ ./bin/blast indexer get --grpc-address=:5020 enwiki_1 | jq . +$ ./bin/blast leave --grpc-address=:9000 node2 ``` -You can see the result in JSON format. The result of the above command is: +or, you can use the RESTful API as follows: -```json +```bash +$ curl -X DELETE 'http://127.0.0.1:8000/v1/cluster/node2' +``` + +The following command indexes documents to any node in the cluster: + +```bash +$ ./bin/blast set 1 ' { "fields": { - "_type": "enwiki", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", "timestamp": "2018-07-04T05:41:00Z", - "title_en": "Search engine (computing)" + "_type": "example" } } +' --grpc-address=:9000 | jq . ``` - -## Starting Blast in federated mode (experimental) - -![federation](https://user-images.githubusercontent.com/970948/59768498-6f0d0f80-92df-11e9-8538-2a1c6e44c30a.png) - -Running a Blast in cluster mode allows you to replicate the index among indexers in a cluster to improve fault tolerance. -However, as the index grows, performance degradation can become an issue. Therefore, instead of providing a large single physical index, it is better to distribute indices across multiple indexers. -Blast provides a federated mode to enable distributed search and indexing. - -Blast provides the following type of node for federation: -- manager: Manager manage common index mappings to index across multiple indexers. It also manages information and status of clusters that participate in the federation. -- dispatcher: Dispatcher is responsible for distributed search or indexing of each indexer. In the case of a index request, send document to each cluster based on the document ID. And in the case of a search request, the same query is sent to each cluster, then the search results are merged and returned to the client. - -### Bring up the manager cluster - -Manager can also bring up a cluster like an indexer. Specify a common index mapping for federation at startup. - -```bash -$ ./bin/blast manager start \ - --grpc-address=:5100 \ - --grpc-gateway-address=:6100 \ - --http-address=:8100 \ - --node-id=manager1 \ - --node-address=:2100 \ - --data-dir=/tmp/blast/manager1 \ - --raft-storage-type=boltdb \ - --index-mapping-file=./example/wiki_index_mapping.json \ - --index-type=upside_down \ - --index-storage-type=boltdb - -$ ./bin/blast manager start \ - --peer-grpc-address=:5100 \ - --grpc-address=:5110 \ - --grpc-gateway-address=:6110 \ - --http-address=:8110 \ - --node-id=manager2 \ - --node-address=:2110 \ - --data-dir=/tmp/blast/manager2 \ - --raft-storage-type=boltdb - -$ ./bin/blast manager start \ - --peer-grpc-address=:5100 \ - --grpc-address=:5120 \ - --grpc-gateway-address=:6120 \ - --http-address=:8120 \ - --node-id=manager3 \ - --node-address=:2120 \ - --data-dir=/tmp/blast/manager3 \ - --raft-storage-type=boltdb -``` - -### Bring up the indexer cluster - -Federated mode differs from cluster mode that it specifies the manager in start up to bring up indexer cluster. -The following example starts two 3-node clusters. +So, you can get the document from the node specified by the above command as follows: ```bash -$ ./bin/blast indexer start \ - --manager-grpc-address=:5100 \ - --shard-id=shard1 \ - --grpc-address=:5000 \ - --grpc-gateway-address=:6000 \ - --http-address=:8000 \ - --node-id=indexer1 \ - --node-address=:2000 \ - --data-dir=/tmp/blast/indexer1 \ - --raft-storage-type=boltdb - -$ ./bin/blast indexer start \ - --manager-grpc-address=:5100 \ - --shard-id=shard1 \ - --grpc-address=:5010 \ - --grpc-gateway-address=:6010 \ - --http-address=:8010 \ - --node-id=indexer2 \ - --node-address=:2010 \ - --data-dir=/tmp/blast/indexer2 \ - --raft-storage-type=boltdb - -$ ./bin/blast indexer start \ - --manager-grpc-address=:5100 \ - --shard-id=shard1 \ - --grpc-address=:5020 \ - --grpc-gateway-address=:6020 \ - --http-address=:8020 \ - --node-id=indexer3 \ - --node-address=:2020 \ - --data-dir=/tmp/blast/indexer3 \ - --raft-storage-type=boltdb - -$ ./bin/blast indexer start \ - --manager-grpc-address=:5100 \ - --shard-id=shard2 \ - --grpc-address=:5030 \ - --grpc-gateway-address=:6030 \ - --http-address=:8030 \ - --node-id=indexer4 \ - --node-address=:2030 \ - --data-dir=/tmp/blast/indexer4 \ - --raft-storage-type=boltdb - -$ ./bin/blast indexer start \ - --manager-grpc-address=:5100 \ - --shard-id=shard2 \ - --grpc-address=:5040 \ - --grpc-gateway-address=:6040 \ - --http-address=:8040 \ - --node-id=indexer5 \ - --node-address=:2040 \ - --data-dir=/tmp/blast/indexer5 \ - --raft-storage-type=boltdb - -$ ./bin/blast indexer start \ - --manager-grpc-address=:5100 \ - --shard-id=shard2 \ - --grpc-address=:5050 \ - --grpc-gateway-address=:6050 \ - --http-address=:8050 \ - --node-id=indexer6 \ - --node-address=:2050 \ - --data-dir=/tmp/blast/indexer6 \ - --raft-storage-type=boltdb +$ ./bin/blast get 1 --grpc-address=:9000 | jq . ``` -### Start up the dispatcher - -Finally, start the dispatcher with a manager that manages the target federation so that it can perform distributed search and indexing. +You can see the result. The result of the above command is: -```bash -$ ./bin/blast dispatcher start \ - --manager-grpc-address=:5100 \ - --grpc-address=:5200 \ - --grpc-gateway-address=:6200 \ - --http-address=:8200 +```text +value1 ``` -### Check the cluster info - -```bash -$ ./bin/blast manager cluster info --grpc-address=:5100 | jq . -$ ./bin/blast indexer cluster info --grpc-address=:5000 | jq . -$ ./bin/blast indexer cluster info --grpc-address=:5030 | jq . -$ ./bin/blast manager get cluster --grpc-address=:5100 --format=json | jq . -``` +You can also get the same document from other nodes in the cluster as follows: ```bash -$ ./bin/blast dispatcher index --grpc-address=:5200 --file=./example/wiki_bulk_index.jsonl --bulk | jq . +$ ./bin/blast get 1 --grpc-address=:9001 | jq . +$ ./bin/blast get 1 --grpc-address=:9002 | jq . ``` -```bash -$ ./bin/blast dispatcher search --grpc-address=:5200 --file=./example/wiki_search_request_simple.json | jq . -``` +You can see the result. The result of the above command is: -```bash -$ ./bin/blast dispatcher delete --grpc-address=:5200 --file=./example/wiki_bulk_delete.txt | jq . +```json +{ + "fields": { + "_type": "example", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": "2018-07-04T05:41:00Z", + "title": "Search engine (computing)" + } +} ``` -## Blast on Docker +## Docker -### Building Docker container image on localhost +### Build Docker container image You can build the Docker container image like so: @@ -1140,7 +646,7 @@ You can build the Docker container image like so: $ make docker-build ``` -### Pulling Docker container image from docker.io +### Pull Docker container image from docker.io You can also use the Docker container image already registered in docker.io like so: @@ -1150,154 +656,101 @@ $ docker pull mosuka/blast:latest See https://hub.docker.com/r/mosuka/blast/tags/ -### Pulling Docker container image from docker.io - -You can also use the Docker container image already registered in docker.io like so: - -```bash -$ docker pull mosuka/blast:latest -``` - -### Running Indexer on Docker +### Start on Docker -Running a Blast data node on Docker. Start Blast data node like so: +Running a Blast data node on Docker. Start Blast node like so: ```bash -$ docker run --rm --name blast-indexer1 \ - -p 2000:2000 \ - -p 5000:5000 \ - -p 6000:6000 \ +$ docker run --rm --name blast-node1 \ + -p 7000:7000 \ -p 8000:8000 \ - -v $(pwd)/example:/opt/blast/example \ - mosuka/blast:latest blast indexer start \ - --grpc-address=:5000 \ - --grpc-gateway-address=:6000 \ + -p 9000:9000 \ + -v $(pwd)/etc/blast_mapping.json:/etc/blast_mapping.json \ + mosuka/blast:latest start \ + --id=node1 \ + --raft-address=:7000 \ --http-address=:8000 \ - --node-id=blast-indexer1 \ - --node-address=:2000 \ - --data-dir=/tmp/blast/indexer1 \ - --raft-storage-type=boltdb \ - --index-mapping-file=/opt/blast/example/wiki_index_mapping.json \ - --index-type=upside_down \ - --index-storage-type=boltdb + --grpc-address=:9000 \ + --data-directory=/tmp/blast/node1 \ + --mapping-file=/etc/blast_mapping.json ``` You can execute the command in docker container as follows: ```bash -$ docker exec -it blast-indexer1 blast indexer node info --grpc-address=:5000 +$ docker exec -it blast-node1 blast node --grpc-address=:9000 ``` -### Running cluster on Docker compose +## Securing Blast -Also, running a Blast cluster on Docker compose. +Blast supports HTTPS access, ensuring that all communication between clients and a cluster is encrypted. -```bash -$ docker-compose up -d manager1 -$ docker-compose up -d indexer1 -$ docker-compose up -d indexer2 -$ docker-compose up -d indexer3 -$ docker-compose up -d indexer4 -$ docker-compose up -d indexer5 -$ docker-compose up -d indexer6 -$ docker-compose up -d dispatcher1 -$ docker-compose ps -$ ./bin/blast manager get --grpc-address=127.0.0.1:5110 /cluster | jq . -$ ./bin/blast dispatcher index --grpc-address=127.0.0.1:5210 --file=./example/wiki_bulk_index.jsonl --bulk | jq . -$ ./bin/blast dispatcher search --grpc-address=127.0.0.1:5210 --file=./example/wiki_search_request_simple.json | jq . -``` +### Generating a certificate and private key -```bash -$ docker-compose down -``` - - -## Wikipedia example - -This section explain how to index Wikipedia dump to Blast. - -### Install wikiextractor +One way to generate the necessary resources is via [openssl](https://www.openssl.org/). For example: ```bash -$ cd ${HOME} -$ git clone git@github.com:attardi/wikiextractor.git +$ openssl req -x509 -nodes -newkey rsa:4096 -keyout ./etc/blast_key.pem -out ./etc/blast_cert.pem -days 365 -subj '/CN=localhost' +Generating a 4096 bit RSA private key +............................++ +........++ +writing new private key to 'key.pem' ``` -### Download wikipedia dump +### Secure cluster example -```bash -$ curl -o ~/tmp/enwiki-20190101-pages-articles.xml.bz2 https://dumps.wikimedia.org/enwiki/20190101/enwiki-20190101-pages-articles.xml.bz2 -``` - -### Parsing wikipedia dump +Starting a node with HTTPS enabled, node-to-node encryption, and with the above configuration file. It is assumed the HTTPS X.509 certificate and key are at the paths server.crt and key.pem respectively. ```bash -$ cd wikiextractor -$ ./WikiExtractor.py -o ~/tmp/enwiki --json ~/tmp/enwiki-20190101-pages-articles.xml.bz2 +$ ./bin/blast start \ + --id=node1 \ + --raft-address=:7000 \ + --http-address=:8000 \ + --grpc-address=:9000 \ + --peer-grpc-address=:9000 \ + --data-directory=/tmp/blast/node1 \ + --mapping-file=./etc/blast_mapping.json \ + --certificate-file=./etc/blast_cert.pem \ + --key-file=./etc/blast_key.pem \ + --common-name=localhost ``` -### Starting Indexer - ```bash -$ ./bin/blast indexer start \ - --grpc-address=:5000 \ - --grpc-gateway-address=:6000 \ - --http-address=:8000 \ - --node-id=indexer1 \ - --node-address=:2000 \ - --data-dir=/tmp/blast/indexer1 \ - --raft-storage-type=boltdb \ - --index-mapping-file=./example/enwiki_index_mapping.json \ - --index-type=upside_down \ - --index-storage-type=boltdb +$ ./bin/blast start \ + --id=node2 \ + --raft-address=:7001 \ + --http-address=:8001 \ + --grpc-address=:9001 \ + --peer-grpc-address=:9000 \ + --data-directory=/tmp/blast/node2 \ + --mapping-file=./etc/blast_mapping.json \ + --certificate-file=./etc/blast_cert.pem \ + --key-file=./etc/blast_key.pem \ + --common-name=localhost ``` -### Indexing wikipedia dump - ```bash -$ for FILE in $(find ~/tmp/enwiki -type f -name '*' | sort) - do - echo "Indexing ${FILE}" - TIMESTAMP=$(date -u "+%Y-%m-%dT%H:%M:%SZ") - DOCS=$(cat ${FILE} | jq -r '. + {fields: {url: .url, title_en: .title, text_en: .text, timestamp: "'${TIMESTAMP}'", _type: "enwiki"}} | del(.url) | del(.title) | del(.text) | del(.fields.id)' | jq -c) - curl -s -X PUT -H 'Content-Type: application/x-ndjson' "http://127.0.0.1:6000/v1/bulk" --data-binary "${DOCS}" - echo "" - done +$ ./bin/blast start \ + --id=node3 \ + --raft-address=:7002 \ + --http-address=:8002 \ + --grpc-address=:9002 \ + --peer-grpc-address=:9000 \ + --data-directory=/tmp/blast/node3 \ + --mapping-file=./etc/blast_mapping.json \ + --certificate-file=./etc/blast_cert.pem \ + --key-file=./etc/blast_key.pem \ + --common-name=localhost ``` - -## Spatial/Geospatial search example - -This section explain how to index Spatial/Geospatial data to Blast. - -### Starting Indexer with Spatial/Geospatial index mapping - -```bash -$ ./bin/blast indexer start \ - --grpc-address=:5000 \ - --http-address=:8000 \ - --node-id=indexer1 \ - --node-address=:2000 \ - --data-dir=/tmp/blast/indexer1 \ - --raft-storage-type=boltdb \ - --index-mapping-file=./example/geo_index_mapping.json \ - --index-type=upside_down \ - --index-storage-type=boltdb -``` - -### Indexing example Spatial/Geospatial data +You can access the cluster by adding a flag, such as the following command: ```bash -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/geo_doc_1.json -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/geo_doc_2.json -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/geo_doc_3.json -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/geo_doc_4.json -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/geo_doc_5.json -$ ./bin/blast indexer index --grpc-address=:5000 --file ./example/geo_doc_6.json +$ ./bin/blast cluster --grpc-address=:9000 --certificate-file=./etc/blast_cert.pem --common-name=localhost | jq . ``` -### Searching example Spatial/Geospatial data +or ```bash -$ ./bin/blast indexer search --grpc-address=:5000 --file=./example/geo_search_request.json +$ curl -X GET https://localhost:8000/v1/cluster --cacert ./etc/cert.pem | jq . ``` diff --git a/builtin/config_bleve.go b/builtin/config_bleve.go new file mode 100644 index 0000000..d95e507 --- /dev/null +++ b/builtin/config_bleve.go @@ -0,0 +1,5 @@ +package builtin + +import ( + _ "github.com/blevesearch/bleve/config" +) diff --git a/builtins/config_badger.go b/builtins/config_badger.go deleted file mode 100644 index b920c65..0000000 --- a/builtins/config_badger.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build badger full - -package builtins - -import ( - _ "github.com/mosuka/bbadger" -) diff --git a/builtins/config_bleve.go b/builtins/config_bleve.go deleted file mode 100644 index 031bf9a..0000000 --- a/builtins/config_bleve.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package builtins - -import ( - _ "github.com/blevesearch/bleve/config" -) diff --git a/client/grpc_client.go b/client/grpc_client.go new file mode 100644 index 0000000..c00fd97 --- /dev/null +++ b/client/grpc_client.go @@ -0,0 +1,218 @@ +package client + +import ( + "context" + "log" + "math" + "time" + + "github.com/golang/protobuf/ptypes/empty" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/protobuf" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/status" +) + +type GRPCClient struct { + ctx context.Context + cancel context.CancelFunc + conn *grpc.ClientConn + client protobuf.IndexClient + + logger *log.Logger +} + +func NewGRPCClient(grpc_address string) (*GRPCClient, error) { + return NewGRPCClientWithContext(grpc_address, context.Background()) +} + +func NewGRPCClientWithContext(grpc_address string, baseCtx context.Context) (*GRPCClient, error) { + return NewGRPCClientWithContextTLS(grpc_address, baseCtx, "", "") +} + +func NewGRPCClientWithContextTLS(grpcAddress string, baseCtx context.Context, certificateFile string, commonName string) (*GRPCClient, error) { + dialOpts := []grpc.DialOption{ + grpc.WithDefaultCallOptions( + grpc.MaxCallSendMsgSize(math.MaxInt64), + grpc.MaxCallRecvMsgSize(math.MaxInt64), + ), + grpc.WithKeepaliveParams( + keepalive.ClientParameters{ + Time: 1 * time.Second, + Timeout: 5 * time.Second, + PermitWithoutStream: true, + }, + ), + } + + ctx, cancel := context.WithCancel(baseCtx) + + if certificateFile == "" { + dialOpts = append(dialOpts, grpc.WithInsecure()) + } else { + creds, err := credentials.NewClientTLSFromFile(certificateFile, commonName) + if err != nil { + return nil, err + } + dialOpts = append(dialOpts, grpc.WithTransportCredentials(creds)) + } + + conn, err := grpc.DialContext(ctx, grpcAddress, dialOpts...) + if err != nil { + cancel() + return nil, err + } + + return &GRPCClient{ + ctx: ctx, + cancel: cancel, + conn: conn, + client: protobuf.NewIndexClient(conn), + }, nil +} + +func (c *GRPCClient) Close() error { + c.cancel() + if c.conn != nil { + return c.conn.Close() + } + + return c.ctx.Err() +} + +func (c *GRPCClient) Target() string { + return c.conn.Target() +} + +func (c *GRPCClient) LivenessCheck(opts ...grpc.CallOption) (*protobuf.LivenessCheckResponse, error) { + if resp, err := c.client.LivenessCheck(c.ctx, &empty.Empty{}, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} + +func (c *GRPCClient) ReadinessCheck(opts ...grpc.CallOption) (*protobuf.ReadinessCheckResponse, error) { + if resp, err := c.client.ReadinessCheck(c.ctx, &empty.Empty{}, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} + +func (c *GRPCClient) Join(req *protobuf.JoinRequest, opts ...grpc.CallOption) error { + if _, err := c.client.Join(c.ctx, req, opts...); err != nil { + return err + } + + return nil +} + +func (c *GRPCClient) Leave(req *protobuf.LeaveRequest, opts ...grpc.CallOption) error { + if _, err := c.client.Leave(c.ctx, req, opts...); err != nil { + return err + } + + return nil +} + +func (c *GRPCClient) Node(opts ...grpc.CallOption) (*protobuf.NodeResponse, error) { + if resp, err := c.client.Node(c.ctx, &empty.Empty{}, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} + +func (c *GRPCClient) Cluster(opts ...grpc.CallOption) (*protobuf.ClusterResponse, error) { + if resp, err := c.client.Cluster(c.ctx, &empty.Empty{}, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} + +func (c *GRPCClient) Snapshot(opts ...grpc.CallOption) error { + if _, err := c.client.Snapshot(c.ctx, &empty.Empty{}); err != nil { + return err + } + + return nil +} + +func (c *GRPCClient) Get(req *protobuf.GetRequest, opts ...grpc.CallOption) (*protobuf.GetResponse, error) { + if resp, err := c.client.Get(c.ctx, req, opts...); err != nil { + st, _ := status.FromError(err) + switch st.Code() { + case codes.NotFound: + return nil, errors.ErrNotFound + default: + return nil, err + } + } else { + return resp, nil + } +} + +func (c *GRPCClient) Search(req *protobuf.SearchRequest, opts ...grpc.CallOption) (*protobuf.SearchResponse, error) { + if resp, err := c.client.Search(c.ctx, req, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} + +func (c *GRPCClient) Set(req *protobuf.SetRequest, opts ...grpc.CallOption) error { + if _, err := c.client.Set(c.ctx, req, opts...); err != nil { + return err + } + + return nil +} + +func (c *GRPCClient) Delete(req *protobuf.DeleteRequest, opts ...grpc.CallOption) error { + if _, err := c.client.Delete(c.ctx, req, opts...); err != nil { + return err + } + + return nil +} + +func (c *GRPCClient) BulkIndex(req *protobuf.BulkIndexRequest, opts ...grpc.CallOption) (*protobuf.BulkIndexResponse, error) { + if resp, err := c.client.BulkIndex(c.ctx, req, opts...); err == nil { + return resp, nil + } else { + return nil, err + } +} + +func (c *GRPCClient) BulkDelete(req *protobuf.BulkDeleteRequest, opts ...grpc.CallOption) (*protobuf.BulkDeleteResponse, error) { + if resp, err := c.client.BulkDelete(c.ctx, req, opts...); err == nil { + return resp, nil + } else { + return nil, err + } +} + +func (c *GRPCClient) Mapping(opts ...grpc.CallOption) (*protobuf.MappingResponse, error) { + if resp, err := c.client.Mapping(c.ctx, &empty.Empty{}, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} + +func (c *GRPCClient) Watch(req *empty.Empty, opts ...grpc.CallOption) (protobuf.Index_WatchClient, error) { + return c.client.Watch(c.ctx, req, opts...) +} + +func (c *GRPCClient) Metrics(opts ...grpc.CallOption) (*protobuf.MetricsResponse, error) { + if resp, err := c.client.Metrics(c.ctx, &empty.Empty{}, opts...); err != nil { + return nil, err + } else { + return resp, nil + } +} diff --git a/cmd/blast/dispatcher_delete.go b/cmd/blast/dispatcher_delete.go deleted file mode 100644 index 255e350..0000000 --- a/cmd/blast/dispatcher_delete.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - - "github.com/mosuka/blast/dispatcher" - "github.com/mosuka/blast/protobuf/distribute" - "github.com/urfave/cli" -) - -func dispatcherDelete(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - filePath := c.String("file") - id := c.Args().Get(0) - - // create client - client, err := dispatcher.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - marshaler := dispatcher.JsonMarshaler{} - - if id != "" { - req := &distribute.DeleteRequest{ - Id: id, - } - resp, err := client.Delete(req) - if err != nil { - return err - } - respBytes, err := marshaler.Marshal(resp) - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) - } else { - if filePath != "" { - ids := make([]string, 0) - - _, err := os.Stat(filePath) - if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error - return err - } - - // read index mapping file - file, err := os.Open(filePath) - if err != nil { - return err - } - defer func() { - _ = file.Close() - }() - - reader := bufio.NewReader(file) - for { - docIdBytes, _, err := reader.ReadLine() - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - docId := string(docIdBytes) - if docId != "" { - ids = append(ids, docId) - } - break - } - - return err - } - docId := string(docIdBytes) - if docId != "" { - ids = append(ids, docId) - } - } - - req := &distribute.BulkDeleteRequest{ - Ids: ids, - } - - resp, err := client.BulkDelete(req) - if err != nil { - return err - } - - resultBytes, err := marshaler.Marshal(resp) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) - } else { - return errors.New("argument error") - } - } - - return nil -} diff --git a/cmd/blast/dispatcher_get.go b/cmd/blast/dispatcher_get.go deleted file mode 100644 index cc01500..0000000 --- a/cmd/blast/dispatcher_get.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "errors" - "fmt" - "os" - - "github.com/mosuka/blast/dispatcher" - "github.com/mosuka/blast/protobuf/distribute" - "github.com/urfave/cli" -) - -func dispatcherGet(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - id := c.Args().Get(0) - if id == "" { - err := errors.New("arguments are not correct") - return err - } - - client, err := dispatcher.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &distribute.GetRequest{ - Id: id, - } - - res, err := client.Get(req) - if err != nil { - return err - } - - marshaler := dispatcher.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/dispatcher_index.go b/cmd/blast/dispatcher_index.go deleted file mode 100644 index 59dd811..0000000 --- a/cmd/blast/dispatcher_index.go +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/dispatcher" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/distribute" - "github.com/mosuka/blast/protobuf/index" - "github.com/urfave/cli" -) - -func dispatcherIndex(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - filePath := c.String("file") - bulk := c.Bool("bulk") - - // create gRPC client - client, err := dispatcher.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - marshaler := dispatcher.JsonMarshaler{} - - if c.NArg() >= 2 { - // index document by specifying ID and fields via standard input - id := c.Args().Get(0) - fieldsSrc := c.Args().Get(1) - - var fieldsMap map[string]interface{} - err := json.Unmarshal([]byte(fieldsSrc), &fieldsMap) - if err != nil { - return err - } - - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(fieldsMap, fieldsAny) - if err != nil { - return err - } - - req := &distribute.IndexRequest{ - Id: id, - Fields: fieldsAny, - } - - res, err := client.Index(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } else if c.NArg() == 1 { - // index document by specifying document(s) via standard input - docSrc := c.Args().Get(0) - - if bulk { - // jsonl - docs := make([]*index.Document, 0) - reader := bufio.NewReader(bytes.NewReader([]byte(docSrc))) - for { - docBytes, err := reader.ReadBytes('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - break - } - } - - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - } - - req := &distribute.BulkIndexRequest{ - Documents: docs, - } - res, err := client.BulkIndex(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } else { - // json - var docMap map[string]interface{} - err := json.Unmarshal([]byte(docSrc), &docMap) - if err != nil { - return err - } - - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(docMap["fields"].(map[string]interface{}), fieldsAny) - if err != nil { - return err - } - - req := &distribute.IndexRequest{ - Id: docMap["id"].(string), - Fields: fieldsAny, - } - - res, err := client.Index(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } - } else { - // index document by specifying document(s) via file - if filePath != "" { - _, err := os.Stat(filePath) - if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error - return err - } - - // read index mapping file - file, err := os.Open(filePath) - if err != nil { - return err - } - defer func() { - _ = file.Close() - }() - - if bulk { - // jsonl - docs := make([]*index.Document, 0) - reader := bufio.NewReader(file) - for { - docBytes, err := reader.ReadBytes('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - break - } - } - - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - } - - req := &distribute.BulkIndexRequest{ - Documents: docs, - } - res, err := client.BulkIndex(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } else { - // json - docBytes, err := ioutil.ReadAll(file) - if err != nil { - return err - } - var docMap map[string]interface{} - err = json.Unmarshal(docBytes, &docMap) - if err != nil { - return err - } - - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(docMap["fields"].(map[string]interface{}), fieldsAny) - if err != nil { - return err - } - - req := &distribute.IndexRequest{ - Id: docMap["id"].(string), - Fields: fieldsAny, - } - - res, err := client.Index(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } - } else { - return errors.New("argument error") - } - } - - return nil -} diff --git a/cmd/blast/dispatcher_node_health.go b/cmd/blast/dispatcher_node_health.go deleted file mode 100644 index 6594ffe..0000000 --- a/cmd/blast/dispatcher_node_health.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/mosuka/blast/dispatcher" - "github.com/mosuka/blast/protobuf/distribute" - "github.com/urfave/cli" -) - -func dispatcherNodeHealth(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - healthiness := c.Bool("healthiness") - liveness := c.Bool("liveness") - readiness := c.Bool("readiness") - - client, err := dispatcher.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - var res *distribute.NodeHealthCheckResponse - if healthiness { - req := &distribute.NodeHealthCheckRequest{Probe: distribute.NodeHealthCheckRequest_HEALTHINESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &distribute.NodeHealthCheckResponse{State: distribute.NodeHealthCheckResponse_UNHEALTHY} - } - } else if liveness { - req := &distribute.NodeHealthCheckRequest{Probe: distribute.NodeHealthCheckRequest_LIVENESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &distribute.NodeHealthCheckResponse{State: distribute.NodeHealthCheckResponse_DEAD} - } - } else if readiness { - req := &distribute.NodeHealthCheckRequest{Probe: distribute.NodeHealthCheckRequest_READINESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &distribute.NodeHealthCheckResponse{State: distribute.NodeHealthCheckResponse_NOT_READY} - } - } else { - req := &distribute.NodeHealthCheckRequest{Probe: distribute.NodeHealthCheckRequest_HEALTHINESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &distribute.NodeHealthCheckResponse{State: distribute.NodeHealthCheckResponse_UNHEALTHY} - } - } - - marshaler := dispatcher.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/dispatcher_search.go b/cmd/blast/dispatcher_search.go deleted file mode 100644 index bf6ccda..0000000 --- a/cmd/blast/dispatcher_search.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "os" - - "github.com/blevesearch/bleve" - "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/dispatcher" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/distribute" - "github.com/urfave/cli" -) - -func dispatcherSearch(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - filePath := c.String("file") - - searchRequest := bleve.NewSearchRequest(nil) - - if filePath != "" { - _, err := os.Stat(filePath) - if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error - return err - } - - // open file - file, err := os.Open(filePath) - if err != nil { - return err - } - defer func() { - _ = file.Close() - }() - - // read file - fileBytes, err := ioutil.ReadAll(file) - if err != nil { - return err - } - - // create search request - if fileBytes != nil { - var tmpValue map[string]interface{} - err = json.Unmarshal(fileBytes, &tmpValue) - if err != nil { - return err - } - searchRequestMap, ok := tmpValue["search_request"] - if !ok { - return errors.New("value does not exist") - } - searchRequestBytes, err := json.Marshal(searchRequestMap) - if err != nil { - return err - } - err = json.Unmarshal(searchRequestBytes, &searchRequest) - if err != nil { - return err - } - } - } - - client, err := dispatcher.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - searchRequestAny := &any.Any{} - err = protobuf.UnmarshalAny(searchRequest, searchRequestAny) - if err != nil { - return err - } - - req := &distribute.SearchRequest{SearchRequest: searchRequestAny} - - res, err := client.Search(req) - if err != nil { - return err - } - - marshaler := dispatcher.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/dispatcher_start.go b/cmd/blast/dispatcher_start.go deleted file mode 100644 index 4b61df3..0000000 --- a/cmd/blast/dispatcher_start.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "os" - "os/signal" - "syscall" - - "github.com/mosuka/blast/dispatcher" - "github.com/mosuka/blast/logutils" - "github.com/urfave/cli" -) - -func dispatcherStart(c *cli.Context) error { - managerAddr := c.String("manager-grpc-address") - - grpcAddr := c.String("grpc-address") - grpcGatewayAddr := c.String("grpc-gateway-address") - httpAddr := c.String("http-address") - - logLevel := c.GlobalString("log-level") - logFilename := c.GlobalString("log-file") - logMaxSize := c.GlobalInt("log-max-size") - logMaxBackups := c.GlobalInt("log-max-backups") - logMaxAge := c.GlobalInt("log-max-age") - logCompress := c.GlobalBool("log-compress") - - grpcLogLevel := c.GlobalString("grpc-log-level") - grpcLogFilename := c.GlobalString("grpc-log-file") - grpcLogMaxSize := c.GlobalInt("grpc-log-max-size") - grpcLogMaxBackups := c.GlobalInt("grpc-log-max-backups") - grpcLogMaxAge := c.GlobalInt("grpc-log-max-age") - grpcLogCompress := c.GlobalBool("grpc-log-compress") - - httpLogFilename := c.GlobalString("http-log-file") - httpLogMaxSize := c.GlobalInt("http-log-max-size") - httpLogMaxBackups := c.GlobalInt("http-log-max-backups") - httpLogMaxAge := c.GlobalInt("http-log-max-age") - httpLogCompress := c.GlobalBool("http-log-compress") - - // create logger - logger := logutils.NewLogger( - logLevel, - logFilename, - logMaxSize, - logMaxBackups, - logMaxAge, - logCompress, - ) - - // create logger - grpcLogger := logutils.NewGRPCLogger( - grpcLogLevel, - grpcLogFilename, - grpcLogMaxSize, - grpcLogMaxBackups, - grpcLogMaxAge, - grpcLogCompress, - ) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger( - httpLogFilename, - httpLogMaxSize, - httpLogMaxBackups, - httpLogMaxAge, - httpLogCompress, - ) - - svr, err := dispatcher.NewServer(managerAddr, grpcAddr, grpcGatewayAddr, httpAddr, logger, grpcLogger, httpAccessLogger) - if err != nil { - return err - } - - quitCh := make(chan os.Signal, 1) - signal.Notify(quitCh, os.Kill, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - - go svr.Start() - - <-quitCh - - svr.Stop() - - return nil -} diff --git a/cmd/blast/indexer_cluster_info.go b/cmd/blast/indexer_cluster_info.go deleted file mode 100644 index 7963655..0000000 --- a/cmd/blast/indexer_cluster_info.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/indexer" - "github.com/urfave/cli" -) - -func indexerClusterInfo(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &empty.Empty{} - - resp, err := client.ClusterInfo(req) - if err != nil { - return err - } - - marshaler := indexer.JsonMarshaler{} - respBytes, err := marshaler.Marshal(resp) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) - - return nil -} diff --git a/cmd/blast/indexer_cluster_leave.go b/cmd/blast/indexer_cluster_leave.go deleted file mode 100644 index 0793229..0000000 --- a/cmd/blast/indexer_cluster_leave.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/protobuf/index" - "github.com/urfave/cli" -) - -func indexerClusterLeave(c *cli.Context) error { - clusterGrpcAddr := c.String("manager-grpc-address") - shardId := c.String("shard-id") - peerGrpcAddr := c.String("peer-grpc-address") - - if clusterGrpcAddr != "" && shardId != "" { - // get grpc address of leader node - } else if peerGrpcAddr != "" { - // get grpc address of leader node - } - - nodeId := c.String("node-id") - - client, err := indexer.NewGRPCClient(peerGrpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &index.ClusterLeaveRequest{ - Id: nodeId, - } - - resp, err := client.ClusterLeave(req) - if err != nil { - return err - } - - marshaler := indexer.JsonMarshaler{} - respBytes, err := marshaler.Marshal(resp) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) - - return nil -} diff --git a/cmd/blast/indexer_cluster_watch.go b/cmd/blast/indexer_cluster_watch.go deleted file mode 100644 index a991b34..0000000 --- a/cmd/blast/indexer_cluster_watch.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "io" - "log" - "os" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/protobuf/index" - "github.com/urfave/cli" -) - -func indexerClusterWatch(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - marshaler := indexer.JsonMarshaler{} - - req := &empty.Empty{} - clusterInfo, err := client.ClusterInfo(req) - if err != nil { - return err - } - resp := &index.ClusterWatchResponse{ - Event: 0, - Node: nil, - Cluster: clusterInfo.Cluster, - } - respBytes, err := marshaler.Marshal(resp) - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) - - clusterWatchClient, err := client.ClusterWatch(req) - if err != nil { - return err - } - - for { - resp, err := clusterWatchClient.Recv() - if err == io.EOF { - break - } - if err != nil { - log.Println(err.Error()) - break - } - respBytes, err = marshaler.Marshal(resp) - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) - } - - return nil -} diff --git a/cmd/blast/indexer_delete.go b/cmd/blast/indexer_delete.go deleted file mode 100644 index b8aa834..0000000 --- a/cmd/blast/indexer_delete.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/protobuf/index" - "github.com/urfave/cli" -) - -func indexerDelete(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - filePath := c.String("file") - id := c.Args().Get(0) - - // create client - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - marshaler := indexer.JsonMarshaler{} - - if id != "" { - req := &index.DeleteRequest{ - Id: id, - } - resp, err := client.Delete(req) - if err != nil { - return err - } - respBytes, err := marshaler.Marshal(resp) - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) - } else { - if filePath != "" { - ids := make([]string, 0) - - _, err := os.Stat(filePath) - if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error - return err - } - - // read index mapping file - file, err := os.Open(filePath) - if err != nil { - return err - } - defer func() { - _ = file.Close() - }() - - reader := bufio.NewReader(file) - for { - docIdBytes, _, err := reader.ReadLine() - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - docId := string(docIdBytes) - if docId != "" { - ids = append(ids, docId) - } - break - } - - return err - } - docId := string(docIdBytes) - if docId != "" { - ids = append(ids, docId) - } - } - - req := &index.BulkDeleteRequest{ - Ids: ids, - } - - resp, err := client.BulkDelete(req) - if err != nil { - return err - } - - resultBytes, err := marshaler.Marshal(resp) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resultBytes))) - } else { - return errors.New("argument error") - } - } - - return nil -} diff --git a/cmd/blast/indexer_get.go b/cmd/blast/indexer_get.go deleted file mode 100644 index 976e4be..0000000 --- a/cmd/blast/indexer_get.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "errors" - "fmt" - "os" - - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/protobuf/index" - "github.com/urfave/cli" -) - -func indexerGet(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - id := c.Args().Get(0) - if id == "" { - err := errors.New("arguments are not correct") - return err - } - - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &index.GetRequest{ - Id: id, - } - - resp, err := client.Get(req) - if err != nil { - return err - } - - marshaler := indexer.JsonMarshaler{} - respBytes, err := marshaler.Marshal(resp) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) - - return nil -} diff --git a/cmd/blast/indexer_index.go b/cmd/blast/indexer_index.go deleted file mode 100644 index 7f5521c..0000000 --- a/cmd/blast/indexer_index.go +++ /dev/null @@ -1,272 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/index" - "github.com/urfave/cli" -) - -func indexerIndex(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - filePath := c.String("file") - bulk := c.Bool("bulk") - - // create gRPC client - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - marshaler := indexer.JsonMarshaler{} - - if c.NArg() >= 2 { - // index document by specifying ID and fields via standard input - id := c.Args().Get(0) - docSrc := c.Args().Get(1) - - var docMap map[string]interface{} - err := json.Unmarshal([]byte(docSrc), &docMap) - if err != nil { - return err - } - - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(docMap["fields"], fieldsAny) - if err != nil { - return err - } - - req := &index.IndexRequest{ - Id: id, - Fields: fieldsAny, - } - - res, err := client.Index(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } else if c.NArg() == 1 { - // index document by specifying document(s) via standard input - docSrc := c.Args().Get(0) - - if bulk { - // jsonl - docs := make([]*index.Document, 0) - reader := bufio.NewReader(bytes.NewReader([]byte(docSrc))) - for { - docBytes, err := reader.ReadBytes('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - break - } - } - - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - } - - req := &index.BulkIndexRequest{ - Documents: docs, - } - res, err := client.BulkIndex(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } else { - // json - var docMap map[string]interface{} - err := json.Unmarshal([]byte(docSrc), &docMap) - if err != nil { - return err - } - - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(docMap["fields"].(map[string]interface{}), fieldsAny) - if err != nil { - return err - } - - req := &index.IndexRequest{ - Id: docMap["id"].(string), - Fields: fieldsAny, - } - - res, err := client.Index(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } - } else { - // index document by specifying document(s) via file - if filePath != "" { - _, err := os.Stat(filePath) - if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error - return err - } - - // read index mapping file - file, err := os.Open(filePath) - if err != nil { - return err - } - defer func() { - _ = file.Close() - }() - - if bulk { - // jsonl - docs := make([]*index.Document, 0) - reader := bufio.NewReader(file) - for { - docBytes, err := reader.ReadBytes('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - break - } - } - - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - } - - req := &index.BulkIndexRequest{ - Documents: docs, - } - res, err := client.BulkIndex(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } else { - // json - docBytes, err := ioutil.ReadAll(file) - if err != nil { - return err - } - var docMap map[string]interface{} - err = json.Unmarshal(docBytes, &docMap) - if err != nil { - return err - } - - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(docMap["fields"].(map[string]interface{}), fieldsAny) - if err != nil { - return err - } - - req := &index.IndexRequest{ - Id: docMap["id"].(string), - Fields: fieldsAny, - } - - res, err := client.Index(req) - if err != nil { - return err - } - - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } - } else { - return errors.New("argument error") - } - } - - return nil -} diff --git a/cmd/blast/indexer_node_health.go b/cmd/blast/indexer_node_health.go deleted file mode 100644 index e818992..0000000 --- a/cmd/blast/indexer_node_health.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/protobuf/index" - "github.com/urfave/cli" -) - -func indexerNodeHealth(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - healthiness := c.Bool("healthiness") - liveness := c.Bool("liveness") - readiness := c.Bool("readiness") - - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - var res *index.NodeHealthCheckResponse - if healthiness { - req := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_HEALTHINESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &index.NodeHealthCheckResponse{State: index.NodeHealthCheckResponse_UNHEALTHY} - } - } else if liveness { - req := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_LIVENESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &index.NodeHealthCheckResponse{State: index.NodeHealthCheckResponse_DEAD} - } - } else if readiness { - req := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_READINESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &index.NodeHealthCheckResponse{State: index.NodeHealthCheckResponse_NOT_READY} - } - } else { - req := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_HEALTHINESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &index.NodeHealthCheckResponse{State: index.NodeHealthCheckResponse_UNHEALTHY} - } - } - - marshaler := indexer.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/indexer_node_info.go b/cmd/blast/indexer_node_info.go deleted file mode 100644 index 610403f..0000000 --- a/cmd/blast/indexer_node_info.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/indexer" - "github.com/urfave/cli" -) - -func indexerNodeInfo(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &empty.Empty{} - - res, err := client.NodeInfo(req) - if err != nil { - return err - } - - marshaler := indexer.JsonMarshaler{} - - nodeBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(nodeBytes))) - - return nil -} diff --git a/cmd/blast/indexer_search.go b/cmd/blast/indexer_search.go deleted file mode 100644 index 2a7d4b0..0000000 --- a/cmd/blast/indexer_search.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "os" - - "github.com/blevesearch/bleve" - "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/index" - "github.com/urfave/cli" -) - -func indexerSearch(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - filePath := c.String("file") - - searchRequest := bleve.NewSearchRequest(nil) - - if filePath != "" { - _, err := os.Stat(filePath) - if err != nil { - if os.IsNotExist(err) { - // does not exist - return err - } - // other error - return err - } - - // open file - file, err := os.Open(filePath) - if err != nil { - return err - } - defer func() { - _ = file.Close() - }() - - // read file - fileBytes, err := ioutil.ReadAll(file) - if err != nil { - return err - } - - // create search request - if fileBytes != nil { - var tmpValue map[string]interface{} - err = json.Unmarshal(fileBytes, &tmpValue) - if err != nil { - return err - } - searchRequestMap, ok := tmpValue["search_request"] - if !ok { - return errors.New("search_request does not exist") - } - searchRequestBytes, err := json.Marshal(searchRequestMap) - if err != nil { - return err - } - err = json.Unmarshal(searchRequestBytes, &searchRequest) - if err != nil { - return err - } - } - } - - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - searchRequestAny := &any.Any{} - err = protobuf.UnmarshalAny(searchRequest, searchRequestAny) - if err != nil { - return err - } - - req := &index.SearchRequest{SearchRequest: searchRequestAny} - - res, err := client.Search(req) - if err != nil { - return err - } - - marshaler := indexer.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/indexer_snapshot.go b/cmd/blast/indexer_snapshot.go deleted file mode 100644 index bad2cf5..0000000 --- a/cmd/blast/indexer_snapshot.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/indexer" - "github.com/urfave/cli" -) - -func indexerSnapshot(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - client, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &empty.Empty{} - - res, err := client.Snapshot(req) - if err != nil { - return err - } - - marshaler := indexer.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/indexer_start.go b/cmd/blast/indexer_start.go deleted file mode 100644 index d01b076..0000000 --- a/cmd/blast/indexer_start.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "os" - "os/signal" - "syscall" - - "github.com/blevesearch/bleve/mapping" - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/indexutils" - "github.com/mosuka/blast/logutils" - "github.com/mosuka/blast/protobuf/index" - "github.com/urfave/cli" -) - -func indexerStart(c *cli.Context) error { - managerGRPCAddr := c.String("manager-grpc-address") - shardId := c.String("shard-id") - peerGRPCAddr := c.String("peer-grpc-address") - - grpcAddr := c.String("grpc-address") - grpcGatewayAddr := c.String("grpc-gateway-address") - httpAddr := c.String("http-address") - - nodeId := c.String("node-id") - nodeAddr := c.String("node-address") - dataDir := c.String("data-dir") - raftStorageType := c.String("raft-storage-type") - - indexMappingFile := c.String("index-mapping-file") - indexType := c.String("index-type") - indexStorageType := c.String("index-storage-type") - - logLevel := c.String("log-level") - logFilename := c.String("log-file") - logMaxSize := c.Int("log-max-size") - logMaxBackups := c.Int("log-max-backups") - logMaxAge := c.Int("log-max-age") - logCompress := c.Bool("log-compress") - - grpcLogLevel := c.String("grpc-log-level") - grpcLogFilename := c.String("grpc-log-file") - grpcLogMaxSize := c.Int("grpc-log-max-size") - grpcLogMaxBackups := c.Int("grpc-log-max-backups") - grpcLogMaxAge := c.Int("grpc-log-max-age") - grpcLogCompress := c.Bool("grpc-log-compress") - - httpLogFile := c.String("http-log-file") - httpLogMaxSize := c.Int("http-log-max-size") - httpLogMaxBackups := c.Int("http-log-max-backups") - httpLogMaxAge := c.Int("http-log-max-age") - httpLogCompress := c.Bool("http-log-compress") - - // create logger - logger := logutils.NewLogger( - logLevel, - logFilename, - logMaxSize, - logMaxBackups, - logMaxAge, - logCompress, - ) - - // create logger - grpcLogger := logutils.NewGRPCLogger( - grpcLogLevel, - grpcLogFilename, - grpcLogMaxSize, - grpcLogMaxBackups, - grpcLogMaxAge, - grpcLogCompress, - ) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger( - httpLogFile, - httpLogMaxSize, - httpLogMaxBackups, - httpLogMaxAge, - httpLogCompress, - ) - - node := &index.Node{ - Id: nodeId, - BindAddress: nodeAddr, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddr, - GrpcGatewayAddress: grpcGatewayAddr, - HttpAddress: httpAddr, - }, - } - - var err error - - // create index mapping - var indexMapping *mapping.IndexMappingImpl - if indexMappingFile != "" { - indexMapping, err = indexutils.NewIndexMappingFromFile(indexMappingFile) - if err != nil { - return err - } - } else { - indexMapping = mapping.NewIndexMapping() - } - - svr, err := indexer.NewServer(managerGRPCAddr, shardId, peerGRPCAddr, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger.Named(nodeId), grpcLogger.Named(nodeId), httpAccessLogger) - if err != nil { - return err - } - - quitCh := make(chan os.Signal, 1) - signal.Notify(quitCh, os.Kill, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - - go svr.Start() - - <-quitCh - - svr.Stop() - - return nil -} diff --git a/cmd/blast/main.go b/cmd/blast/main.go deleted file mode 100644 index 7183f17..0000000 --- a/cmd/blast/main.go +++ /dev/null @@ -1,1010 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - "path" - - "github.com/blevesearch/bleve" - "github.com/mosuka/blast/version" - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - app.Name = path.Base(os.Args[0]) - app.Usage = "Command for blast" - app.Version = version.Version - app.Authors = []cli.Author{ - { - Name: "mosuka", - Email: "minoru.osuka@gmail.com", - }, - } - - app.Commands = []cli.Command{ - { - Name: "manager", - Usage: "Command for blast manager", - Subcommands: []cli.Command{ - { - Name: "start", - Usage: "Start blast manager", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "peer-grpc-address", - Value: "", - EnvVar: "BLAST_MANAGER_PEER_GRPC_ADDRESS", - Usage: "The gRPC address of the peer node that exists in the cluster to be joined", - }, - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - EnvVar: "BLAST_MANAGER_GRPC_ADDRESS", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "grpc-gateway-address", - Value: ":6100", - EnvVar: "BLAST_MANAGER_GRPC_GATEWAY_ADDRESS", - Usage: "The gRPC gateway listen address", - }, - cli.StringFlag{ - Name: "http-address", - Value: ":8100", - EnvVar: "BLAST_MANAGER_HTTP_ADDRESS", - Usage: "HTTP listen address", - }, - cli.StringFlag{ - Name: "node-id", - Value: "", - EnvVar: "BLAST_MANAGER_NODE_ID", - Usage: "Unique ID to identify the node", - }, - cli.StringFlag{ - Name: "node-address", - Value: ":2100", - EnvVar: "BLAST_MANAGER_NODE_ADDRESS", - Usage: "The address that should be bound to for internal cluster communications", - }, - cli.StringFlag{ - Name: "data-dir", - Value: "/tmp/blast/manager", - EnvVar: "BLAST_MANAGER_DATA_DIR", - Usage: "A data directory for the node to store state", - }, - cli.StringFlag{ - Name: "raft-storage-type", - Value: "boltdb", - EnvVar: "BLAST_MANAGER_RAFT_STORAGE_TYPE", - Usage: "Storage type of the database that stores the state", - }, - cli.StringFlag{ - Name: "index-mapping-file", - Value: "", - EnvVar: "BLAST_MANAGER_INDEX_MAPPING_FILE", - Usage: "An index mapping file to use", - }, - cli.StringFlag{ - Name: "index-type", - Value: bleve.Config.DefaultIndexType, - EnvVar: "BLAST_MANAGER_INDEX_TYPE", - Usage: "An index type to use", - }, - cli.StringFlag{ - Name: "index-storage-type", - Value: bleve.Config.DefaultKVStore, - EnvVar: "BLAST_MANAGER_INDEX_STORAGE_TYPE", - Usage: "An index storage type to use", - }, - cli.StringFlag{ - Name: "log-level", - Value: "INFO", - EnvVar: "BLAST_MANAGER_LOG_LEVEL", - Usage: "Log level", - }, - cli.StringFlag{ - Name: "log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_MANAGER_LOG_FILE", - Usage: "Log file", - }, - cli.IntFlag{ - Name: "log-max-size", - Value: 500, - EnvVar: "BLAST_MANAGER_LOG_MAX_SIZE", - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "log-max-backups", - Value: 3, - EnvVar: "BLAST_MANAGER_LOG_MAX_BACKUPS", - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "log-max-age", - Value: 30, - EnvVar: "BLAST_MANAGER_LOG_MAX_AGE", - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "log-compress", - EnvVar: "BLAST_MANAGER_LOG_COMPRESS", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "grpc-log-level", - Value: "WARN", - EnvVar: "BLAST_MANAGER_GRPC_LOG_LEVEL", - Usage: "gRPC log level", - }, - cli.StringFlag{ - Name: "grpc-log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_MANAGER_GRPC_LOG_FILE", - Usage: "gRPC log file", - }, - cli.IntFlag{ - Name: "grpc-log-max-size", - Value: 500, - EnvVar: "BLAST_MANAGER_GRPC_LOG_MAX_SIZE", - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "grpc-log-max-backups", - Value: 3, - EnvVar: "BLAST_MANAGER_GRPC_LOG_MAX_BACKUPS", - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "grpc-log-max-age", - Value: 30, - EnvVar: "BLAST_MANAGER_GRPC_LOG_MAX_AGE", - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "grpc-log-compress", - EnvVar: "BLAST_MANAGER_GRPC_LOG_COMPRESS", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "http-log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_MANAGER_HTTP_LOG_FILE", - Usage: "HTTP access log file", - }, - cli.IntFlag{ - Name: "http-log-max-size", - Value: 500, - EnvVar: "BLAST_MANAGER_HTTP_LOG_MAX_SIZE", - Usage: "Max size of a HTTP access log file (megabytes)", - }, - cli.IntFlag{ - Name: "http-log-max-backups", - Value: 3, - EnvVar: "BLAST_MANAGER_HTTP_LOG_MAX_BACKUPS", - Usage: "Max backup count of HTTP access log files", - }, - cli.IntFlag{ - Name: "http-log-max-age", - Value: 30, - EnvVar: "BLAST_MANAGER_HTTP_LOG_MAX_AGE", - Usage: "Max age of a HTTP access log file (days)", - }, - cli.BoolFlag{ - Name: "http-log-compress", - EnvVar: "BLAST_MANAGER_HTTP_LOG_COMPRESS", - Usage: "Compress a HTTP access log", - }, - }, - Action: managerStart, - }, - { - Name: "node", - Usage: "Command for blast manager node", - Subcommands: []cli.Command{ - { - Name: "info", - Usage: "Get node information", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - Usage: "The gRPC address of the node for which to retrieve the node information", - }, - }, - Action: managerNodeInfo, - }, - { - Name: "healthcheck", - Usage: "Health check the node", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - Usage: "The gRPC listen address", - }, - cli.BoolFlag{ - Name: "healthiness", - Usage: "healthiness probe", - }, - cli.BoolFlag{ - Name: "liveness", - Usage: "Liveness probe", - }, - cli.BoolFlag{ - Name: "readiness", - Usage: "Readiness probe", - }, - }, - Action: managerNodeHealthCheck, - }, - }, - }, - { - Name: "cluster", - Usage: "Command for blast manager cluster", - Subcommands: []cli.Command{ - { - Name: "info", - Usage: "Get cluster information", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - Usage: "The gRPC address of the node for which to retrieve the node information", - }, - }, - Action: managerClusterInfo, - }, - { - Name: "watch", - Usage: "Watch peers", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - Usage: "The gRPC address of the node for which to retrieve the node information", - }, - }, - Action: managerClusterWatch, - }, - { - Name: "leave", - Usage: "Leave the manager from the cluster", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "peer-grpc-address", - Value: "", - Usage: "The gRPC address of the peer node that exists in the cluster to be joined", - }, - cli.StringFlag{ - Name: "node-id", - Value: "", - Usage: "The gRPC listen address", - }, - }, - Action: managerClusterLeave, - }, - }, - }, - { - Name: "get", - Usage: "Get data", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - Usage: "The gRPC listen address", - }, - }, - ArgsUsage: "[key]", - Action: managerGet, - }, - { - Name: "set", - Usage: "Set data", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "file", - Value: "", - Usage: "Value file", - }, - }, - ArgsUsage: "[key] [value]", - Action: managerSet, - }, - { - Name: "delete", - Usage: "Delete data", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - Usage: "The gRPC listen address", - }, - }, - ArgsUsage: "[key]", - Action: managerDelete, - }, - { - Name: "watch", - Usage: "Watch data", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - Usage: "The gRPC listen address", - }, - }, - ArgsUsage: "[key]", - Action: managerWatch, - }, - { - Name: "snapshot", - Usage: "Snapshot the data", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5100", - Usage: "The gRPC listen address", - }, - }, - Action: managerSnapshot, - }, - }, - }, - { - Name: "indexer", - Usage: "Command for blast indexer", - Subcommands: []cli.Command{ - { - Name: "start", - Usage: "Start blast indexer", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "manager-grpc-address", - Value: "", - EnvVar: "BLAST_INDEXER_MANAGER_GRPC_ADDRESS", - Usage: "The gRPC address of the existing cluster manager to be joined", - }, - cli.StringFlag{ - Name: "shard-id", - Value: "", - EnvVar: "BLAST_INDEXER_SHARD_ID", - Usage: "Shard ID registered in the existing cluster to be joined", - }, - cli.StringFlag{ - Name: "peer-grpc-address", - Value: "", - EnvVar: "BLAST_INDEXER_PEER_GRPC_ADDRESS", - Usage: "The gRPC address of the peer node that exists in the cluster to be joined", - }, - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - EnvVar: "BLAST_INDEXER_GRPC_ADDRESS", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "grpc-gateway-address", - Value: ":6000", - EnvVar: "BLAST_INDEXER_GRPC_GATEWAY_ADDRESS", - Usage: "The gRPC gateway listen address", - }, - cli.StringFlag{ - Name: "http-address", - Value: ":8000", - EnvVar: "BLAST_INDEXER_HTTP_ADDRESS", - Usage: "HTTP listen address", - }, - cli.StringFlag{ - Name: "node-id", - Value: "", - EnvVar: "BLAST_INDEXER_NODE_ID", - Usage: "Unique ID to identify the node", - }, - cli.StringFlag{ - Name: "node-address", - Value: ":2000", - EnvVar: "BLAST_INDEXER_NODE_ADDRESS", - Usage: "The address that should be bound to for internal cluster communications", - }, - cli.StringFlag{ - Name: "data-dir", - Value: "/tmp/blast/indexer", - EnvVar: "BLAST_INDEXER_DATA_DIR", - Usage: "A data directory for the node to store state", - }, - cli.StringFlag{ - Name: "raft-storage-type", - Value: "boltdb", - EnvVar: "BLAST_INDEXER_RAFT_STORAGE_TYPE", - Usage: "Storage type of the database that stores the state", - }, - cli.StringFlag{ - Name: "index-mapping-file", - Value: "", - EnvVar: "BLAST_INDEXER_INDEX_MAPPING_FILE", - Usage: "An index mapping file to use", - }, - cli.StringFlag{ - Name: "index-type", - Value: bleve.Config.DefaultIndexType, - EnvVar: "BLAST_INDEXER_INDEX_TYPE", - Usage: "An index type to use", - }, - cli.StringFlag{ - Name: "index-storage-type", - Value: bleve.Config.DefaultKVStore, - EnvVar: "BLAST_INDEXER_INDEX_STORAGE_TYPE", - Usage: "An index storage type to use", - }, - cli.StringFlag{ - Name: "log-level", - Value: "INFO", - EnvVar: "BLAST_INDEXER_LOG_LEVEL", - Usage: "Log level", - }, - cli.StringFlag{ - Name: "log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_INDEXER_LOG_FILE", - Usage: "Log file", - }, - cli.IntFlag{ - Name: "log-max-size", - Value: 500, - EnvVar: "BLAST_INDEXER_LOG_MAX_SIZE", - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "log-max-backups", - Value: 3, - EnvVar: "BLAST_INDEXER_LOG_MAX_BACKUPS", - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "log-max-age", - Value: 30, - EnvVar: "BLAST_INDEXER_LOG_MAX_AGE", - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "log-compress", - EnvVar: "BLAST_INDEXER_LOG_COMPRESS", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "grpc-log-level", - Value: "WARN", - EnvVar: "BLAST_INDEXER_GRPC_LOG_LEVEL", - Usage: "gRPC log level", - }, - cli.StringFlag{ - Name: "grpc-log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_INDEXER_GRPC_LOG_FILE", - Usage: "gRPC log file", - }, - cli.IntFlag{ - Name: "grpc-log-max-size", - Value: 500, - EnvVar: "BLAST_INDEXER_GRPC_LOG_MAX_SIZE", - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "grpc-log-max-backups", - Value: 3, - EnvVar: "BLAST_INDEXER_GRPC_LOG_MAX_BACKUPS", - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "grpc-log-max-age", - Value: 30, - EnvVar: "BLAST_INDEXER_GRPC_LOG_MAX_AGE", - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "grpc-log-compress", - EnvVar: "BLAST_INDEXER_GRPC_LOG_COMPRESS", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "http-log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_INDEXER_HTTP_LOG_FILE", - Usage: "HTTP access log file", - }, - cli.IntFlag{ - Name: "http-log-max-size", - Value: 500, - EnvVar: "BLAST_INDEXER_HTTP_LOG_MAX_SIZE", - Usage: "Max size of a HTTP access log file (megabytes)", - }, - cli.IntFlag{ - Name: "http-log-max-backups", - Value: 3, - EnvVar: "BLAST_INDEXER_HTTP_LOG_MAX_BACKUPS", - Usage: "Max backup count of HTTP access log files", - }, - cli.IntFlag{ - Name: "http-log-max-age", - Value: 30, - EnvVar: "BLAST_INDEXER_HTTP_LOG_MAX_AGE", - Usage: "Max age of a HTTP access log file (days)", - }, - cli.BoolFlag{ - Name: "http-log-compress", - EnvVar: "BLAST_INDEXER_HTTP_LOG_COMPRESS", - Usage: "Compress a HTTP access log", - }, - }, - Action: indexerStart, - }, - { - Name: "node", - Usage: "Command for blast indexer node", - Subcommands: []cli.Command{ - { - Name: "info", - Usage: "Get node information", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - Usage: "The gRPC address of the node for which to retrieve the node information", - }, - }, - Action: indexerNodeInfo, - }, - { - Name: "healthcheck", - Usage: "Health check the node", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - Usage: "The gRPC listen address", - }, - cli.BoolFlag{ - Name: "healthiness", - Usage: "healthiness probe", - }, - cli.BoolFlag{ - Name: "liveness", - Usage: "Liveness probe", - }, - cli.BoolFlag{ - Name: "readiness", - Usage: "Readiness probe", - }, - }, - Action: indexerNodeHealth, - }, - }, - }, - { - Name: "cluster", - Usage: "Command for blast indexer cluster", - Subcommands: []cli.Command{ - { - Name: "info", - Usage: "Get cluster information", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - Usage: "The gRPC address of the node for which to retrieve the node information", - }, - }, - Action: indexerClusterInfo, - }, - { - Name: "watch", - Usage: "Watch cluster", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - Usage: "The gRPC address of the node for which to retrieve the node information", - }, - }, - Action: indexerClusterWatch, - }, - { - Name: "leave", - Usage: "Leave the indexer from the cluster", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "manager-grpc-address", - Value: "", - Usage: "The gRPC address of the existing cluster node to be joined", - }, - cli.StringFlag{ - Name: "shard-id", - Value: "", - Usage: "Shard ID registered in the existing cluster to be joined", - }, - cli.StringFlag{ - Name: "peer-grpc-address", - Value: "", - Usage: "The gRPC address of the peer node that exists in the cluster to be joined", - }, - cli.StringFlag{ - Name: "node-id", - Value: "", - Usage: "Node ID to delete", - }, - }, - Action: indexerClusterLeave, - }, - }, - }, - { - Name: "get", - Usage: "Get document(s)", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "file", - Value: "", - Usage: "Document ID list", - }, - }, - ArgsUsage: "[document ID]", - Action: indexerGet, - }, - { - Name: "index", - Usage: "Index document(s)", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "file", - Value: "", - Usage: "Document list", - }, - cli.BoolFlag{ - Name: "bulk", - Usage: "Bulk indexing", - }, - }, - ArgsUsage: "[document ID] [document fields]", - Action: indexerIndex, - }, - { - Name: "delete", - Usage: "Delete document(s)", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "file", - Value: "", - Usage: "Document ID list", - }, - }, - ArgsUsage: "[document ID]", - Action: indexerDelete, - }, - { - Name: "search", - Usage: "Search document(s)", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "file", - Value: "", - Usage: "Search request", - }, - }, - ArgsUsage: "[search request]", - Action: indexerSearch, - }, - { - Name: "snapshot", - Usage: "Snapshot", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5000", - Usage: "The gRPC listen address", - }, - }, - Action: indexerSnapshot, - }, - }, - }, - { - Name: "dispatcher", - Usage: "Command for blast dispatcher", - Subcommands: []cli.Command{ - { - Name: "start", - Usage: "Start blast dispatcher", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "manager-grpc-address", - Value: ":5100", - EnvVar: "BLAST_DISPATCHER_CLUSTER_GRPC_ADDRESS", - Usage: "The gRPC address of the existing cluster node to be joined", - }, - cli.StringFlag{ - Name: "grpc-address", - Value: ":5200", - EnvVar: "BLAST_DISPATCHER_GRPC_ADDRESS", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "grpc-gateway-address", - Value: ":6200", - EnvVar: "BLAST_DISPATCHER_GRPC_GATEWAY_ADDRESS", - Usage: "The gRPC gateway listen address", - }, - cli.StringFlag{ - Name: "http-address", - Value: ":8200", - EnvVar: "BLAST_DISPATCHER_HTTP_ADDRESS", - Usage: "HTTP listen address", - }, - cli.StringFlag{ - Name: "log-level", - Value: "INFO", - EnvVar: "BLAST_DISPATCHER_LOG_LEVEL", - Usage: "Log level", - }, - cli.StringFlag{ - Name: "log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_DISPATCHER_LOG_FILE", - Usage: "Log file", - }, - cli.IntFlag{ - Name: "log-max-size", - Value: 500, - EnvVar: "BLAST_DISPATCHER_LOG_MAX_SIZE", - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "log-max-backups", - Value: 3, - EnvVar: "BLAST_DISPATCHER_LOG_MAX_BACKUPS", - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "log-max-age", - Value: 30, - EnvVar: "BLAST_DISPATCHER_LOG_MAX_AGE", - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "log-compress", - EnvVar: "BLAST_DISPATCHER_LOG_COMPRESS", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "grpc-log-level", - Value: "WARN", - EnvVar: "BLAST_DISPATCHER_GRPC_LOG_LEVEL", - Usage: "gRPC log level", - }, - cli.StringFlag{ - Name: "grpc-log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_DISPATCHER_GRPC_LOG_FILE", - Usage: "gRPC log file", - }, - cli.IntFlag{ - Name: "grpc-log-max-size", - Value: 500, - EnvVar: "BLAST_DISPATCHER_GRPC_LOG_MAX_SIZE", - Usage: "Max size of a log file (megabytes)", - }, - cli.IntFlag{ - Name: "grpc-log-max-backups", - Value: 3, - EnvVar: "BLAST_DISPATCHER_GRPC_LOG_MAX_BACKUPS", - Usage: "Max backup count of log files", - }, - cli.IntFlag{ - Name: "grpc-log-max-age", - Value: 30, - EnvVar: "BLAST_DISPATCHER_GRPC_LOG_MAX_AGE", - Usage: "Max age of a log file (days)", - }, - cli.BoolFlag{ - Name: "grpc-log-compress", - EnvVar: "BLAST_DISPATCHER_GRPC_LOG_COMPRESS", - Usage: "Compress a log file", - }, - cli.StringFlag{ - Name: "http-log-file", - Value: os.Stderr.Name(), - EnvVar: "BLAST_DISPATCHER_HTTP_LOG_FILE", - Usage: "HTTP access log file", - }, - cli.IntFlag{ - Name: "http-log-max-size", - Value: 500, - EnvVar: "BLAST_DISPATCHER_HTTP_LOG_MAX_SIZE", - Usage: "Max size of a HTTP access log file (megabytes)", - }, - cli.IntFlag{ - Name: "http-log-max-backups", - Value: 3, - EnvVar: "BLAST_DISPATCHER_HTTP_LOG_MAX_BACKUPS", - Usage: "Max backup count of HTTP access log files", - }, - cli.IntFlag{ - Name: "http-log-max-age", - Value: 30, - EnvVar: "BLAST_DISPATCHER_HTTP_LOG_MAX_AGE", - Usage: "Max age of a HTTP access log file (days)", - }, - cli.BoolFlag{ - Name: "http-log-compress", - EnvVar: "BLAST_DISPATCHER_HTTP_LOG_COMPRESS", - Usage: "Compress a HTTP access log", - }, - }, - Action: dispatcherStart, - }, - { - Name: "node", - Usage: "Command for blast dispatcher node", - Subcommands: []cli.Command{ - { - Name: "healthcheck", - Usage: "Health check the node", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5200", - Usage: "The gRPC listen address", - }, - cli.BoolFlag{ - Name: "healthiness", - Usage: "healthiness probe", - }, - cli.BoolFlag{ - Name: "liveness", - Usage: "Liveness probe", - }, - cli.BoolFlag{ - Name: "readiness", - Usage: "Readiness probe", - }, - }, - Action: dispatcherNodeHealth, - }, - }, - }, - { - Name: "get", - Usage: "Get document(s)", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5200", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "file", - Value: "", - Usage: "Document ID list", - }, - }, - ArgsUsage: "[document IDs]", - Action: dispatcherGet, - }, - { - Name: "index", - Usage: "Index document(s)", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5200", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "file", - Value: "", - Usage: "Document list", - }, - cli.BoolFlag{ - Name: "bulk", - Usage: "Bulk indexing", - }, - }, - ArgsUsage: "[document ID] [document fields]", - Action: dispatcherIndex, - }, - { - Name: "delete", - Usage: "Delete document(s)", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5200", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "file", - Value: "", - Usage: "Document ID list", - }, - }, - ArgsUsage: "[document IDs]", - Action: dispatcherDelete, - }, - { - Name: "search", - Usage: "Search document(s)", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "grpc-address", - Value: ":5200", - Usage: "The gRPC listen address", - }, - cli.StringFlag{ - Name: "file", - Value: "", - Usage: "Search request", - }, - }, - ArgsUsage: "[search request]", - Action: dispatcherSearch, - }, - }, - }, - } - - cli.HelpFlag = cli.BoolFlag{ - Name: "help, h", - Usage: "Show this message", - } - cli.VersionFlag = cli.BoolFlag{ - Name: "version, v", - Usage: "Print the version", - } - - err := app.Run(os.Args) - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } -} diff --git a/cmd/blast/manager_cluster_info.go b/cmd/blast/manager_cluster_info.go deleted file mode 100644 index 8b0a25a..0000000 --- a/cmd/blast/manager_cluster_info.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/manager" - "github.com/urfave/cli" -) - -func managerClusterInfo(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - client, err := manager.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &empty.Empty{} - res, err := client.ClusterInfo(req) - if err != nil { - return err - } - - marshaler := manager.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/manager_cluster_leave.go b/cmd/blast/manager_cluster_leave.go deleted file mode 100644 index 12ae8e1..0000000 --- a/cmd/blast/manager_cluster_leave.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf/management" - "github.com/urfave/cli" -) - -func managerClusterLeave(c *cli.Context) error { - peerGrpcAddr := c.String("peer-grpc-address") - - if peerGrpcAddr != "" { - // get grpc address of leader node - } - - nodeId := c.String("node-id") - - client, err := manager.NewGRPCClient(peerGrpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &management.ClusterLeaveRequest{ - Id: nodeId, - } - res, err := client.ClusterLeave(req) - if err != nil { - return err - } - - marshaler := manager.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/manager_cluster_watch.go b/cmd/blast/manager_cluster_watch.go deleted file mode 100644 index 320965c..0000000 --- a/cmd/blast/manager_cluster_watch.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "io" - "log" - "os" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf/management" - "github.com/urfave/cli" -) - -func managerClusterWatch(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - client, err := manager.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - marshaler := manager.JsonMarshaler{} - - req := &empty.Empty{} - res, err := client.ClusterInfo(req) - if err != nil { - return err - } - resp := &management.ClusterWatchResponse{ - Event: 0, - Node: nil, - Cluster: res.Cluster, - } - resBytes, err := marshaler.Marshal(resp) - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - watchClient, err := client.ClusterWatch(req) - if err != nil { - return err - } - - for { - resp, err = watchClient.Recv() - if err == io.EOF { - break - } - if err != nil { - log.Println(err.Error()) - break - } - - resBytes, err = marshaler.Marshal(resp) - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - } - - return nil -} diff --git a/cmd/blast/manager_delete.go b/cmd/blast/manager_delete.go deleted file mode 100644 index 0caf391..0000000 --- a/cmd/blast/manager_delete.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "errors" - "fmt" - "os" - - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf/management" - "github.com/urfave/cli" -) - -func managerDelete(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - key := c.Args().Get(0) - if key == "" { - err := errors.New("key argument must be set") - return err - } - - client, err := manager.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &management.DeleteRequest{ - Key: key, - } - res, err := client.Delete(req) - if err != nil { - return err - } - - marshaler := manager.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/manager_get.go b/cmd/blast/manager_get.go deleted file mode 100644 index 6b41f0e..0000000 --- a/cmd/blast/manager_get.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf/management" - "github.com/urfave/cli" -) - -func managerGet(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - key := c.Args().Get(0) - - client, err := manager.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &management.GetRequest{ - Key: key, - } - - res, err := client.Get(req) - if err != nil { - return err - } - - marshaler := manager.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/manager_node_health.go b/cmd/blast/manager_node_health.go deleted file mode 100644 index e2eb209..0000000 --- a/cmd/blast/manager_node_health.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf/management" - "github.com/urfave/cli" -) - -func managerNodeHealthCheck(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - healthiness := c.Bool("healthiness") - liveness := c.Bool("liveness") - readiness := c.Bool("readiness") - - client, err := manager.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - var res *management.NodeHealthCheckResponse - if healthiness { - req := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_HEALTHINESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &management.NodeHealthCheckResponse{State: management.NodeHealthCheckResponse_UNHEALTHY} - } - } else if liveness { - req := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_LIVENESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &management.NodeHealthCheckResponse{State: management.NodeHealthCheckResponse_DEAD} - } - } else if readiness { - req := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_READINESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &management.NodeHealthCheckResponse{State: management.NodeHealthCheckResponse_NOT_READY} - } - } else { - req := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_HEALTHINESS} - res, err = client.NodeHealthCheck(req) - if err != nil { - res = &management.NodeHealthCheckResponse{State: management.NodeHealthCheckResponse_UNHEALTHY} - } - } - - marshaler := manager.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/manager_node_info.go b/cmd/blast/manager_node_info.go deleted file mode 100644 index ca190e1..0000000 --- a/cmd/blast/manager_node_info.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/manager" - "github.com/urfave/cli" -) - -func managerNodeInfo(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - client, err := manager.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &empty.Empty{} - res, err := client.NodeInfo(req) - if err != nil { - return err - } - - marshaler := manager.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/manager_set.go b/cmd/blast/manager_set.go deleted file mode 100644 index f7bdac8..0000000 --- a/cmd/blast/manager_set.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "errors" - "fmt" - "os" - - "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/management" - "github.com/urfave/cli" -) - -func managerSet(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - key := c.Args().Get(0) - if key == "" { - err := errors.New("key argument must be set") - return err - } - - valueStr := c.Args().Get(1) - if valueStr == "" { - err := errors.New("value argument must be set") - return err - } - - var value interface{} - err := json.Unmarshal([]byte(valueStr), &value) - if err != nil { - switch err.(type) { - case *json.SyntaxError: - value = valueStr - default: - return err - } - } - - client, err := manager.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - valueAny := &any.Any{} - err = protobuf.UnmarshalAny(value, valueAny) - if err != nil { - return err - } - - req := &management.SetRequest{ - Key: key, - Value: valueAny, - } - - res, err := client.Set(req) - if err != nil { - return err - } - - marshaler := manager.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/manager_snapshot.go b/cmd/blast/manager_snapshot.go deleted file mode 100644 index f252e34..0000000 --- a/cmd/blast/manager_snapshot.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/manager" - "github.com/urfave/cli" -) - -func managerSnapshot(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - client, err := manager.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &empty.Empty{} - res, err := client.Snapshot(req) - if err != nil { - return err - } - - marshaler := manager.JsonMarshaler{} - resBytes, err := marshaler.Marshal(res) - if err != nil { - return err - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(resBytes))) - - return nil -} diff --git a/cmd/blast/manager_start.go b/cmd/blast/manager_start.go deleted file mode 100644 index 94a7445..0000000 --- a/cmd/blast/manager_start.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "os" - "os/signal" - "syscall" - - "github.com/blevesearch/bleve/mapping" - "github.com/mosuka/blast/indexutils" - "github.com/mosuka/blast/logutils" - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf/management" - "github.com/urfave/cli" -) - -func managerStart(c *cli.Context) error { - peerGrpcAddr := c.String("peer-grpc-address") - - grpcAddr := c.String("grpc-address") - grpcGatewayAddr := c.String("grpc-gateway-address") - httpAddr := c.String("http-address") - - nodeId := c.String("node-id") - nodeAddr := c.String("node-address") - dataDir := c.String("data-dir") - raftStorageType := c.String("raft-storage-type") - - indexMappingFile := c.String("index-mapping-file") - indexType := c.String("index-type") - indexStorageType := c.String("index-storage-type") - - logLevel := c.String("log-level") - logFilename := c.String("log-file") - logMaxSize := c.Int("log-max-size") - logMaxBackups := c.Int("log-max-backups") - logMaxAge := c.Int("log-max-age") - logCompress := c.Bool("log-compress") - - grpcLogLevel := c.String("grpc-log-level") - grpcLogFilename := c.String("grpc-log-file") - grpcLogMaxSize := c.Int("grpc-log-max-size") - grpcLogMaxBackups := c.Int("grpc-log-max-backups") - grpcLogMaxAge := c.Int("grpc-log-max-age") - grpcLogCompress := c.Bool("grpc-log-compress") - - httpLogFilename := c.String("http-log-file") - httpLogMaxSize := c.Int("http-log-max-size") - httpLogMaxBackups := c.Int("http-log-max-backups") - httpLogMaxAge := c.Int("http-log-max-age") - httpLogCompress := c.Bool("http-log-compress") - - // create logger - logger := logutils.NewLogger( - logLevel, - logFilename, - logMaxSize, - logMaxBackups, - logMaxAge, - logCompress, - ) - - // create logger - grpcLogger := logutils.NewGRPCLogger( - grpcLogLevel, - grpcLogFilename, - grpcLogMaxSize, - grpcLogMaxBackups, - grpcLogMaxAge, - grpcLogCompress, - ) - - // create HTTP access logger - httpLogger := logutils.NewApacheCombinedLogger( - httpLogFilename, - httpLogMaxSize, - httpLogMaxBackups, - httpLogMaxAge, - httpLogCompress, - ) - - node := &management.Node{ - Id: nodeId, - BindAddress: nodeAddr, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddr, - GrpcGatewayAddress: grpcGatewayAddr, - HttpAddress: httpAddr, - }, - } - - var err error - - // create index mapping - var indexMapping *mapping.IndexMappingImpl - if indexMappingFile != "" { - indexMapping, err = indexutils.NewIndexMappingFromFile(indexMappingFile) - if err != nil { - return err - } - } else { - indexMapping = mapping.NewIndexMapping() - } - - svr, err := manager.NewServer(peerGrpcAddr, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger.Named(nodeId), grpcLogger.Named(nodeId), httpLogger) - if err != nil { - return err - } - - quitCh := make(chan os.Signal, 1) - signal.Notify(quitCh, os.Kill, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - - go svr.Start() - - <-quitCh - - svr.Stop() - - return nil -} diff --git a/cmd/blast/manager_watch.go b/cmd/blast/manager_watch.go deleted file mode 100644 index ff010df..0000000 --- a/cmd/blast/manager_watch.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "io" - "log" - "os" - - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf/management" - "github.com/urfave/cli" -) - -func managerWatch(c *cli.Context) error { - grpcAddr := c.String("grpc-address") - - key := c.Args().Get(0) - - client, err := manager.NewGRPCClient(grpcAddr) - if err != nil { - return err - } - defer func() { - err := client.Close() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - }() - - req := &management.WatchRequest{ - Key: key, - } - watchClient, err := client.Watch(req) - if err != nil { - return err - } - - marshaler := manager.JsonMarshaler{} - - for { - resp, err := watchClient.Recv() - if err == io.EOF { - break - } - if err != nil { - log.Println(err.Error()) - break - } - - respBytes, err := marshaler.Marshal(resp) - if err != nil { - log.Println(err.Error()) - break - } - - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(respBytes))) - } - - return nil -} diff --git a/cmd/bulk_delete.go b/cmd/bulk_delete.go new file mode 100644 index 0000000..603eeb9 --- /dev/null +++ b/cmd/bulk_delete.go @@ -0,0 +1,129 @@ +package cmd + +import ( + "bufio" + "context" + "fmt" + "io" + "os" + "strings" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + bulkDeleteCmd = &cobra.Command{ + Use: "bulk-delete", + Short: "Delete a document", + Long: "Delete a document", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + req := &protobuf.BulkDeleteRequest{ + Requests: make([]*protobuf.DeleteRequest, 0), + } + + var reader *bufio.Reader + if file != "" { + // from file + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() + reader = bufio.NewReader(f) + } else { + // from stdin + reader = bufio.NewReader(os.Stdin) + } + + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + r := &protobuf.DeleteRequest{ + Id: strings.TrimSpace(string(docBytes)), + } + req.Requests = append(req.Requests, r) + } + break + } + } + if len(docBytes) > 0 { + r := &protobuf.DeleteRequest{ + Id: strings.TrimSpace(string(docBytes)), + } + req.Requests = append(req.Requests, r) + } + } + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + count, err := c.BulkDelete(req) + if err != nil { + return err + } + + fmt.Println(count) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(bulkDeleteCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + bulkDeleteCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + bulkDeleteCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + bulkDeleteCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + bulkDeleteCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + bulkDeleteCmd.PersistentFlags().StringVar(&file, "file", "", "path to the file that documents have written in NDJSON(JSONL) format") + + _ = viper.BindPFlag("grpc_address", bulkDeleteCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", bulkDeleteCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", bulkDeleteCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/bulk_index.go b/cmd/bulk_index.go new file mode 100644 index 0000000..56293b0 --- /dev/null +++ b/cmd/bulk_index.go @@ -0,0 +1,135 @@ +package cmd + +import ( + "bufio" + "context" + "fmt" + "io" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + bulkIndexCmd = &cobra.Command{ + Use: "bulk-index", + Short: "Index documents in bulk", + Long: "Index documents in bulk", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + m := marshaler.BlastMarshaler{} + + req := &protobuf.BulkIndexRequest{ + Requests: make([]*protobuf.SetRequest, 0), + } + + var reader *bufio.Reader + if file != "" { + // from file + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() + reader = bufio.NewReader(f) + } else { + // from stdin + reader = bufio.NewReader(os.Stdin) + } + + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + r := &protobuf.SetRequest{} + err := m.Unmarshal(docBytes, r) + if err != nil { + continue + } + req.Requests = append(req.Requests, r) + } + break + } + } + if len(docBytes) > 0 { + r := &protobuf.SetRequest{} + err := m.Unmarshal(docBytes, r) + if err != nil { + continue + } + req.Requests = append(req.Requests, r) + } + } + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + count, err := c.BulkIndex(req) + if err != nil { + return err + } + + fmt.Println(count) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(bulkIndexCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + bulkIndexCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + bulkIndexCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + bulkIndexCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + bulkIndexCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + bulkIndexCmd.PersistentFlags().StringVar(&file, "file", "", "path to the file that documents have written in NDJSON(JSONL) format") + + _ = viper.BindPFlag("grpc_address", bulkIndexCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", bulkIndexCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", bulkIndexCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/cluster.go b/cmd/cluster.go new file mode 100644 index 0000000..ef78f42 --- /dev/null +++ b/cmd/cluster.go @@ -0,0 +1,90 @@ +package cmd + +import ( + "context" + "encoding/json" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + clusterCmd = &cobra.Command{ + Use: "cluster", + Short: "Get the cluster info", + Long: "Get the cluster info", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + resp, err := c.Cluster() + if err != nil { + return err + } + + respBytes, err := json.Marshal(resp) + if err != nil { + return err + } + + fmt.Println(string(respBytes)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(clusterCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + clusterCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + clusterCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + clusterCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + clusterCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", clusterCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", clusterCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", clusterCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/delete.go b/cmd/delete.go new file mode 100644 index 0000000..ea21b04 --- /dev/null +++ b/cmd/delete.go @@ -0,0 +1,89 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + deleteCmd = &cobra.Command{ + Use: "delete ID", + Args: cobra.ExactArgs(1), + Short: "Delete a document", + Long: "Delete a document", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + id := args[0] + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + req := &protobuf.DeleteRequest{ + Id: id, + } + + if err := c.Delete(req); err != nil { + return err + } + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(deleteCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + deleteCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + deleteCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + deleteCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + deleteCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", deleteCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", deleteCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", deleteCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/get.go b/cmd/get.go new file mode 100644 index 0000000..99a62c0 --- /dev/null +++ b/cmd/get.go @@ -0,0 +1,99 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + getCmd = &cobra.Command{ + Use: "get ID", + Args: cobra.ExactArgs(1), + Short: "Get a document", + Long: "Get a document", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + id := args[0] + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + req := &protobuf.GetRequest{ + Id: id, + } + + resp, err := c.Get(req) + if err != nil { + return err + } + + m := marshaler.BlastMarshaler{} + respBytes, err := m.Marshal(resp) + if err != nil { + return err + } + + fmt.Println(string(respBytes)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(getCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + getCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + getCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + getCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + getCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", getCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", getCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", getCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/healthcheck.go b/cmd/healthcheck.go new file mode 100644 index 0000000..ffe28a6 --- /dev/null +++ b/cmd/healthcheck.go @@ -0,0 +1,100 @@ +package cmd + +import ( + "context" + "encoding/json" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + healthCheckCmd = &cobra.Command{ + Use: "healthcheck", + Short: "Health check a node", + Long: "Health check a node", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + lResp, err := c.LivenessCheck() + if err != nil { + return err + } + + rResp, err := c.ReadinessCheck() + if err != nil { + return err + } + + resp := map[string]bool{ + "liveness": lResp.Alive, + "readiness:": rResp.Ready, + } + + respBytes, err := json.Marshal(resp) + if err != nil { + return err + } + + fmt.Println(string(respBytes)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(healthCheckCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + healthCheckCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + healthCheckCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + healthCheckCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + healthCheckCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", healthCheckCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", healthCheckCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", healthCheckCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/join.go b/cmd/join.go new file mode 100644 index 0000000..81bd84d --- /dev/null +++ b/cmd/join.go @@ -0,0 +1,104 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + joinCmd = &cobra.Command{ + Use: "join ID GRPC_ADDRESS", + Args: cobra.ExactArgs(2), + Short: "Join a node to the cluster", + Long: "Join a node to the cluster", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + id := args[0] + targetGrpcAddress := args[1] + + t, err := client.NewGRPCClientWithContextTLS(targetGrpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = t.Close() + }() + + nodeResp, err := t.Node() + if err != nil { + return err + } + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + req := &protobuf.JoinRequest{ + Id: id, + Node: nodeResp.Node, + } + + if err := c.Join(req); err != nil { + return err + } + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(joinCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + joinCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + joinCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + joinCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + joinCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", joinCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", joinCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", joinCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/leave.go b/cmd/leave.go new file mode 100644 index 0000000..42d8ffa --- /dev/null +++ b/cmd/leave.go @@ -0,0 +1,89 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + leaveCmd = &cobra.Command{ + Use: "leave ID", + Args: cobra.ExactArgs(1), + Short: "Leave a node from the cluster", + Long: "Leave a node from the cluster", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + id := args[0] + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + req := &protobuf.LeaveRequest{ + Id: id, + } + + if err := c.Leave(req); err != nil { + return err + } + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(leaveCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in config search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + leaveCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + leaveCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + leaveCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + leaveCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", leaveCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", leaveCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", leaveCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/mapping.go b/cmd/mapping.go new file mode 100644 index 0000000..bbf116d --- /dev/null +++ b/cmd/mapping.go @@ -0,0 +1,84 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + mappingCmd = &cobra.Command{ + Use: "mapping", + Short: "Get the index mapping", + Long: "Get the index mapping", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + resp, err := c.Mapping() + if err != nil { + return err + } + + fmt.Println(string(resp.Mapping)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(mappingCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + mappingCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + mappingCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + mappingCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + mappingCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", mappingCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", mappingCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", mappingCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/metrics.go b/cmd/metrics.go new file mode 100644 index 0000000..425d564 --- /dev/null +++ b/cmd/metrics.go @@ -0,0 +1,84 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + metricsCmd = &cobra.Command{ + Use: "metrics", + Short: "Get the node metrics", + Long: "Get the node metrics in Prometheus exposition format", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + resp, err := c.Metrics() + if err != nil { + return err + } + + fmt.Println(string(resp.Metrics)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(metricsCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + metricsCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + metricsCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + metricsCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + metricsCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", metricsCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", metricsCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", metricsCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/node.go b/cmd/node.go new file mode 100644 index 0000000..572c512 --- /dev/null +++ b/cmd/node.go @@ -0,0 +1,90 @@ +package cmd + +import ( + "context" + "encoding/json" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + nodeCmd = &cobra.Command{ + Use: "node", + Short: "Get the node info", + Long: "Get the node info", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + resp, err := c.Node() + if err != nil { + return err + } + + respBytes, err := json.Marshal(resp) + if err != nil { + return err + } + + fmt.Println(string(respBytes)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(nodeCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + nodeCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + nodeCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + nodeCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + nodeCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", nodeCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", nodeCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", nodeCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/root.go b/cmd/root.go new file mode 100644 index 0000000..f2c7120 --- /dev/null +++ b/cmd/root.go @@ -0,0 +1,17 @@ +package cmd + +import ( + "github.com/spf13/cobra" +) + +var ( + rootCmd = &cobra.Command{ + Use: "blast", + Short: "The lightweight distributed search server", + Long: "The lightweight distributed search server", + } +) + +func Execute() error { + return rootCmd.Execute() +} diff --git a/cmd/search.go b/cmd/search.go new file mode 100644 index 0000000..e62b15b --- /dev/null +++ b/cmd/search.go @@ -0,0 +1,101 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + searchCmd = &cobra.Command{ + Use: "search REQUEST", + Args: cobra.ExactArgs(1), + Short: "Get a document", + Long: "Get a document", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + searchRequest := args[0] + + m := marshaler.BlastMarshaler{} + + req := &protobuf.SearchRequest{} + if err := m.Unmarshal([]byte(searchRequest), req); err != nil { + return err + } + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + resp, err := c.Search(req) + if err != nil { + return err + } + + respBytes, err := m.Marshal(resp) + if err != nil { + return err + } + + fmt.Println(string(respBytes)) + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(searchCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + searchCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + searchCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + searchCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + searchCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", searchCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", searchCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", searchCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/set.go b/cmd/set.go new file mode 100644 index 0000000..b765bd7 --- /dev/null +++ b/cmd/set.go @@ -0,0 +1,94 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + setCmd = &cobra.Command{ + Use: "set ID FIELDS", + Args: cobra.ExactArgs(2), + Short: "Set a document", + Long: "Set a document", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + id := args[0] + fields := args[1] + + req := &protobuf.SetRequest{} + m := marshaler.BlastMarshaler{} + if err := m.Unmarshal([]byte(fields), req); err != nil { + return err + } + req.Id = id + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + if err := c.Set(req); err != nil { + return err + } + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(setCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + setCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + setCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + setCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + setCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", setCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", setCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", setCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/snapshot.go b/cmd/snapshot.go new file mode 100644 index 0000000..2e76298 --- /dev/null +++ b/cmd/snapshot.go @@ -0,0 +1,81 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + snapshotCmd = &cobra.Command{ + Use: "snapshot", + Short: "Create a snapshot", + Long: "Create a snapshot which is full-volume copy of data stored on the node", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + if err := c.Snapshot(); err != nil { + return err + } + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(snapshotCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + snapshotCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + snapshotCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + snapshotCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + snapshotCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", snapshotCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", snapshotCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", snapshotCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/cmd/start.go b/cmd/start.go new file mode 100644 index 0000000..8f4e6ca --- /dev/null +++ b/cmd/start.go @@ -0,0 +1,211 @@ +package cmd + +import ( + "context" + "fmt" + "os" + "os/signal" + "syscall" + "time" + + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/log" + "github.com/mosuka/blast/mapping" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/server" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + startCmd = &cobra.Command{ + Use: "start", + Short: "Start the index server", + Long: "Start the index server", + RunE: func(cmd *cobra.Command, args []string) error { + id = viper.GetString("id") + raftAddress = viper.GetString("raft_address") + grpcAddress = viper.GetString("grpc_address") + httpAddress = viper.GetString("http_address") + dataDirectory = viper.GetString("data_directory") + peerGrpcAddress = viper.GetString("peer_grpc_address") + + mappingFile = viper.GetString("mapping_file") + + certificateFile = viper.GetString("certificate_file") + keyFile = viper.GetString("key_file") + commonName = viper.GetString("common_name") + + logLevel = viper.GetString("log_level") + logFile = viper.GetString("log_file") + logMaxSize = viper.GetInt("log_max_size") + logMaxBackups = viper.GetInt("log_max_backups") + logMaxAge = viper.GetInt("log_max_age") + logCompress = viper.GetBool("log_compress") + + logger := log.NewLogger( + logLevel, + logFile, + logMaxSize, + logMaxBackups, + logMaxAge, + logCompress, + ) + + bootstrap := peerGrpcAddress == "" || peerGrpcAddress == grpcAddress + + indexMapping := mapping.NewIndexMapping() + if mappingFile != "" { + var err error + if indexMapping, err = mapping.NewIndexMappingFromFile(mappingFile); err != nil { + return err + } + } + + raftServer, err := server.NewRaftServer(id, raftAddress, dataDirectory, indexMapping, bootstrap, logger) + if err != nil { + return err + } + + grpcServer, err := server.NewGRPCServer(grpcAddress, raftServer, certificateFile, keyFile, commonName, logger) + if err != nil { + return err + } + + grpcGateway, err := server.NewGRPCGateway(httpAddress, grpcAddress, certificateFile, keyFile, commonName, logger) + if err != nil { + return err + } + + quitCh := make(chan os.Signal, 1) + signal.Notify(quitCh, os.Kill, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + + if err := raftServer.Start(); err != nil { + return err + } + + if err := grpcServer.Start(); err != nil { + return err + } + + if err := grpcGateway.Start(); err != nil { + return err + } + + // wait for detect leader if it's bootstrap + if bootstrap { + timeout := 60 * time.Second + if err := raftServer.WaitForDetectLeader(timeout); err != nil { + return err + } + } + + // create gRPC client for joining node + var joinGrpcAddress string + if bootstrap { + joinGrpcAddress = grpcAddress + } else { + joinGrpcAddress = peerGrpcAddress + } + + c, err := client.NewGRPCClientWithContextTLS(joinGrpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + // join this node to the existing cluster + joinRequest := &protobuf.JoinRequest{ + Id: id, + Node: &protobuf.Node{ + RaftAddress: raftAddress, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + }, + } + if err = c.Join(joinRequest); err != nil { + return err + } + + // wait for receiving signal + <-quitCh + + _ = grpcGateway.Stop() + _ = grpcServer.Stop() + _ = raftServer.Stop() + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(startCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + startCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + startCmd.PersistentFlags().StringVar(&id, "id", "node1", "node ID") + startCmd.PersistentFlags().StringVar(&raftAddress, "raft-address", ":7000", "Raft server listen address") + startCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + startCmd.PersistentFlags().StringVar(&httpAddress, "http-address", ":8000", "HTTP server listen address") + startCmd.PersistentFlags().StringVar(&dataDirectory, "data-directory", "/tmp/blast/data", "data directory which store the index and Raft logs") + startCmd.PersistentFlags().StringVar(&peerGrpcAddress, "peer-grpc-address", "", "listen address of the existing gRPC server in the joining cluster") + startCmd.PersistentFlags().StringVar(&mappingFile, "mapping-file", "", "path to the index mapping file") + startCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + startCmd.PersistentFlags().StringVar(&keyFile, "key-file", "", "path to the client server TLS key file") + startCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + startCmd.PersistentFlags().StringVar(&logLevel, "log-level", "INFO", "log level") + startCmd.PersistentFlags().StringVar(&logFile, "log-file", os.Stderr.Name(), "log file") + startCmd.PersistentFlags().IntVar(&logMaxSize, "log-max-size", 500, "max size of a log file in megabytes") + startCmd.PersistentFlags().IntVar(&logMaxBackups, "log-max-backups", 3, "max backup count of log files") + startCmd.PersistentFlags().IntVar(&logMaxAge, "log-max-age", 30, "max age of a log file in days") + startCmd.PersistentFlags().BoolVar(&logCompress, "log-compress", false, "compress a log file") + + _ = viper.BindPFlag("id", startCmd.PersistentFlags().Lookup("id")) + _ = viper.BindPFlag("raft_address", startCmd.PersistentFlags().Lookup("raft-address")) + _ = viper.BindPFlag("grpc_address", startCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("http_address", startCmd.PersistentFlags().Lookup("http-address")) + _ = viper.BindPFlag("data_directory", startCmd.PersistentFlags().Lookup("data-directory")) + _ = viper.BindPFlag("peer_grpc_address", startCmd.PersistentFlags().Lookup("peer-grpc-address")) + _ = viper.BindPFlag("mapping_file", startCmd.PersistentFlags().Lookup("mapping-file")) + _ = viper.BindPFlag("certificate_file", startCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("key_file", startCmd.PersistentFlags().Lookup("key-file")) + _ = viper.BindPFlag("common_name", startCmd.PersistentFlags().Lookup("common-name")) + _ = viper.BindPFlag("log_level", startCmd.PersistentFlags().Lookup("log-level")) + _ = viper.BindPFlag("log_max_size", startCmd.PersistentFlags().Lookup("log-max-size")) + _ = viper.BindPFlag("log_max_backups", startCmd.PersistentFlags().Lookup("log-max-backups")) + _ = viper.BindPFlag("log_max_age", startCmd.PersistentFlags().Lookup("log-max-age")) + _ = viper.BindPFlag("log_compress", startCmd.PersistentFlags().Lookup("log-compress")) +} diff --git a/cmd/variables.go b/cmd/variables.go new file mode 100644 index 0000000..8022742 --- /dev/null +++ b/cmd/variables.go @@ -0,0 +1,22 @@ +package cmd + +var ( + configFile string + id string + raftAddress string + grpcAddress string + httpAddress string + dataDirectory string + peerGrpcAddress string + mappingFile string + certificateFile string + keyFile string + commonName string + file string + logLevel string + logFile string + logMaxSize int + logMaxBackups int + logMaxAge int + logCompress bool +) diff --git a/cmd/version.go b/cmd/version.go new file mode 100644 index 0000000..01d8fa1 --- /dev/null +++ b/cmd/version.go @@ -0,0 +1,24 @@ +package cmd + +import ( + "fmt" + + "github.com/mosuka/blast/version" + "github.com/spf13/cobra" +) + +var ( + versionCmd = &cobra.Command{ + Use: "version", + Short: "Print the version number", + Long: "Print the version number", + RunE: func(cmd *cobra.Command, args []string) error { + fmt.Printf("version: %s\n", version.Version) + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(versionCmd) +} diff --git a/cmd/watch.go b/cmd/watch.go new file mode 100644 index 0000000..da6be9f --- /dev/null +++ b/cmd/watch.go @@ -0,0 +1,157 @@ +package cmd + +import ( + "context" + "fmt" + "io" + "os" + "os/signal" + "syscall" + + "github.com/golang/protobuf/ptypes/empty" + homedir "github.com/mitchellh/go-homedir" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + watchCmd = &cobra.Command{ + Use: "watch", + Short: "Watch a node updates", + Long: "Watch a node updates", + RunE: func(cmd *cobra.Command, args []string) error { + grpcAddress = viper.GetString("grpc_address") + + certificateFile = viper.GetString("certificate_file") + commonName = viper.GetString("common_name") + + c, err := client.NewGRPCClientWithContextTLS(grpcAddress, context.Background(), certificateFile, commonName) + if err != nil { + return err + } + defer func() { + _ = c.Close() + }() + + req := &empty.Empty{} + watchClient, err := c.Watch(req) + if err != nil { + return err + } + + go func() { + for { + resp, err := watchClient.Recv() + if err == io.EOF { + break + } + if err != nil { + break + } + + switch resp.Event.Type { + case protobuf.Event_Join: + eventReq := &protobuf.SetMetadataRequest{} + if eventData, err := marshaler.MarshalAny(resp.Event.Data); err != nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, %v", resp.Event.Type.String(), err)) + } else { + if eventData == nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, nil", resp.Event.Type.String())) + } else { + eventReq = eventData.(*protobuf.SetMetadataRequest) + } + } + fmt.Printf("%s, %v\n", resp.Event.Type.String(), eventReq) + case protobuf.Event_Leave: + eventReq := &protobuf.DeleteMetadataRequest{} + if eventData, err := marshaler.MarshalAny(resp.Event.Data); err != nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, %v", resp.Event.Type.String(), err)) + } else { + if eventData == nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, nil", resp.Event.Type.String())) + } else { + eventReq = eventData.(*protobuf.DeleteMetadataRequest) + } + } + fmt.Printf("%s, %v\n", resp.Event.Type.String(), eventReq) + case protobuf.Event_Set: + putRequest := &protobuf.SetRequest{} + if putRequestInstance, err := marshaler.MarshalAny(resp.Event.Data); err != nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, %v", resp.Event.Type.String(), err)) + } else { + if putRequestInstance == nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, nil", resp.Event.Type.String())) + } else { + putRequest = putRequestInstance.(*protobuf.SetRequest) + } + } + fmt.Printf("%s, %v\n", resp.Event.Type.String(), putRequest) + case protobuf.Event_Delete: + deleteRequest := &protobuf.DeleteRequest{} + if deleteRequestInstance, err := marshaler.MarshalAny(resp.Event.Data); err != nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, %v", resp.Event.Type.String(), err)) + } else { + if deleteRequestInstance == nil { + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf("%s, nil", resp.Event.Type.String())) + } else { + deleteRequest = deleteRequestInstance.(*protobuf.DeleteRequest) + } + } + fmt.Printf("%s, %v\n", resp.Event.Type.String(), deleteRequest) + } + } + }() + + quitCh := make(chan os.Signal, 1) + signal.Notify(quitCh, os.Kill, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + + <-quitCh + + return nil + }, + } +) + +func init() { + rootCmd.AddCommand(watchCmd) + + cobra.OnInitialize(func() { + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + home, err := homedir.Dir() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + viper.AddConfigPath("/etc") + viper.AddConfigPath(home) + viper.SetConfigName("blast") + } + + viper.SetEnvPrefix("BLAST") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + switch err.(type) { + case viper.ConfigFileNotFoundError: + // config file does not found in search path + default: + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + }) + + watchCmd.PersistentFlags().StringVar(&configFile, "config-file", "", "config file. if omitted, blast.yaml in /etc and home directory will be searched") + watchCmd.PersistentFlags().StringVar(&grpcAddress, "grpc-address", ":9000", "gRPC server listen address") + watchCmd.PersistentFlags().StringVar(&certificateFile, "certificate-file", "", "path to the client server TLS certificate file") + watchCmd.PersistentFlags().StringVar(&commonName, "common-name", "", "certificate common name") + + _ = viper.BindPFlag("grpc_address", watchCmd.PersistentFlags().Lookup("grpc-address")) + _ = viper.BindPFlag("certificate_file", watchCmd.PersistentFlags().Lookup("certificate-file")) + _ = viper.BindPFlag("common_name", watchCmd.PersistentFlags().Lookup("common-name")) +} diff --git a/dispatcher/grpc_client.go b/dispatcher/grpc_client.go deleted file mode 100644 index 5ca4658..0000000 --- a/dispatcher/grpc_client.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - "context" - "math" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/protobuf/distribute" - "google.golang.org/grpc" -) - -type GRPCClient struct { - ctx context.Context - cancel context.CancelFunc - conn *grpc.ClientConn - client distribute.DistributeClient -} - -func NewGRPCContext() (context.Context, context.CancelFunc) { - baseCtx := context.TODO() - //return context.WithTimeout(baseCtx, 60*time.Second) - return context.WithCancel(baseCtx) -} - -func NewGRPCClient(address string) (*GRPCClient, error) { - ctx, cancel := NewGRPCContext() - - //streamRetryOpts := []grpc_retry.CallOption{ - // grpc_retry.Disable(), - //} - - //unaryRetryOpts := []grpc_retry.CallOption{ - // grpc_retry.WithBackoff(grpc_retry.BackoffLinear(100 * time.Millisecond)), - // grpc_retry.WithCodes(codes.Unavailable), - // grpc_retry.WithMax(100), - //} - - dialOpts := []grpc.DialOption{ - grpc.WithInsecure(), - grpc.WithDefaultCallOptions( - grpc.MaxCallSendMsgSize(math.MaxInt32), - grpc.MaxCallRecvMsgSize(math.MaxInt32), - ), - //grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(streamRetryOpts...)), - //grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(unaryRetryOpts...)), - } - - conn, err := grpc.DialContext(ctx, address, dialOpts...) - if err != nil { - return nil, err - } - - return &GRPCClient{ - ctx: ctx, - cancel: cancel, - conn: conn, - client: distribute.NewDistributeClient(conn), - }, nil -} - -func (c *GRPCClient) Cancel() { - c.cancel() -} - -func (c *GRPCClient) Close() error { - c.Cancel() - if c.conn != nil { - return c.conn.Close() - } - - return c.ctx.Err() -} - -func (c *GRPCClient) GetAddress() string { - return c.conn.Target() -} - -func (c *GRPCClient) NodeHealthCheck(req *distribute.NodeHealthCheckRequest, opts ...grpc.CallOption) (*distribute.NodeHealthCheckResponse, error) { - return c.client.NodeHealthCheck(c.ctx, req, opts...) -} - -func (c *GRPCClient) Get(req *distribute.GetRequest, opts ...grpc.CallOption) (*distribute.GetResponse, error) { - return c.client.Get(c.ctx, req, opts...) -} - -func (c *GRPCClient) Index(req *distribute.IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.Index(c.ctx, req, opts...) -} - -func (c *GRPCClient) Delete(req *distribute.DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.Delete(c.ctx, req, opts...) -} - -func (c *GRPCClient) BulkIndex(req *distribute.BulkIndexRequest, opts ...grpc.CallOption) (*distribute.BulkIndexResponse, error) { - return c.client.BulkIndex(c.ctx, req, opts...) -} - -func (c *GRPCClient) BulkDelete(req *distribute.BulkDeleteRequest, opts ...grpc.CallOption) (*distribute.BulkDeleteResponse, error) { - return c.client.BulkDelete(c.ctx, req, opts...) -} - -func (c *GRPCClient) Search(req *distribute.SearchRequest, opts ...grpc.CallOption) (*distribute.SearchResponse, error) { - return c.client.Search(c.ctx, req, opts...) -} diff --git a/dispatcher/grpc_gateway.go b/dispatcher/grpc_gateway.go deleted file mode 100644 index f962b4e..0000000 --- a/dispatcher/grpc_gateway.go +++ /dev/null @@ -1,353 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - "bufio" - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - - "github.com/blevesearch/bleve" - "github.com/golang/protobuf/ptypes/any" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/distribute" - "github.com/mosuka/blast/protobuf/index" - "go.uber.org/zap" - "google.golang.org/grpc" -) - -type JsonMarshaler struct{} - -// ContentType always Returns "application/json". -func (*JsonMarshaler) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -func (j *JsonMarshaler) Marshal(v interface{}) ([]byte, error) { - switch v.(type) { - case *distribute.GetResponse: - value, err := protobuf.MarshalAny(v.(*distribute.GetResponse).Fields) - if err != nil { - return nil, err - } - return json.Marshal( - map[string]interface{}{ - "fields": value, - }, - ) - case *distribute.SearchResponse: - value, err := protobuf.MarshalAny(v.(*distribute.SearchResponse).SearchResult) - if err != nil { - return nil, err - } - return json.Marshal( - map[string]interface{}{ - "search_result": value, - }, - ) - default: - return json.Marshal(v) - } -} - -// Unmarshal unmarshals JSON data into "v". -func (j *JsonMarshaler) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// NewDecoder returns a Decoder which reads JSON stream from "r". -func (j *JsonMarshaler) NewDecoder(r io.Reader) runtime.Decoder { - return runtime.DecoderFunc( - func(v interface{}) error { - buffer, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - switch v.(type) { - case *distribute.IndexRequest: - var tmpValue map[string]interface{} - err = json.Unmarshal(buffer, &tmpValue) - if err != nil { - return err - } - id, ok := tmpValue["id"].(string) - if ok { - v.(*distribute.IndexRequest).Id = id - } - - fields, ok := tmpValue["fields"] - if !ok { - return errors.New("value does not exist") - } - v.(*distribute.IndexRequest).Fields = &any.Any{} - return protobuf.UnmarshalAny(fields, v.(*distribute.IndexRequest).Fields) - case *distribute.SearchRequest: - var tmpValue map[string]interface{} - err = json.Unmarshal(buffer, &tmpValue) - if err != nil { - return err - } - searchRequestMap, ok := tmpValue["search_request"] - if !ok { - return errors.New("value does not exist") - } - searchRequestBytes, err := json.Marshal(searchRequestMap) - if err != nil { - return err - } - var searchRequest *bleve.SearchRequest - err = json.Unmarshal(searchRequestBytes, &searchRequest) - if err != nil { - return err - } - v.(*distribute.SearchRequest).SearchRequest = &any.Any{} - return protobuf.UnmarshalAny(searchRequest, v.(*distribute.SearchRequest).SearchRequest) - default: - return json.Unmarshal(buffer, v) - } - }, - ) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JsonMarshaler) NewEncoder(w io.Writer) runtime.Encoder { - return json.NewEncoder(w) -} - -// Delimiter for newline encoded JSON streams. -func (j *JsonMarshaler) Delimiter() []byte { - return []byte("\n") -} - -type JsonlMarshaler struct{} - -// ContentType always Returns "application/json". -func (*JsonlMarshaler) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -func (j *JsonlMarshaler) Marshal(v interface{}) ([]byte, error) { - return json.Marshal(v) -} - -// Unmarshal unmarshals JSON data into "v". -func (j *JsonlMarshaler) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// NewDecoder returns a Decoder which reads JSON-LINE stream from "r". -func (j *JsonlMarshaler) NewDecoder(r io.Reader) runtime.Decoder { - return runtime.DecoderFunc( - func(v interface{}) error { - buffer, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - switch v.(type) { - case *distribute.BulkIndexRequest: - docs := make([]*index.Document, 0) - reader := bufio.NewReader(bytes.NewReader(buffer)) - for { - docBytes, err := reader.ReadBytes('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - break - } - } - - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - } - v.(*distribute.BulkIndexRequest).Documents = docs - return nil - default: - return json.Unmarshal(buffer, v) - } - }, - ) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JsonlMarshaler) NewEncoder(w io.Writer) runtime.Encoder { - return json.NewEncoder(w) -} - -// Delimiter for newline encoded JSON streams. -func (j *JsonlMarshaler) Delimiter() []byte { - return []byte("\n") -} - -type TextMarshaler struct{} - -// ContentType always Returns "application/json". -func (*TextMarshaler) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -func (j *TextMarshaler) Marshal(v interface{}) ([]byte, error) { - return json.Marshal(v) -} - -// Unmarshal unmarshals JSON data into "v". -func (j *TextMarshaler) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// NewDecoder returns a Decoder which reads text stream from "r". -func (j *TextMarshaler) NewDecoder(r io.Reader) runtime.Decoder { - return runtime.DecoderFunc( - func(v interface{}) error { - buffer, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - switch v.(type) { - case *distribute.BulkDeleteRequest: - ids := make([]string, 0) - reader := bufio.NewReader(bytes.NewReader(buffer)) - for { - //idBytes, err := reader.ReadBytes('\n') - idBytes, _, err := reader.ReadLine() - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(idBytes) > 0 { - ids = append(ids, string(idBytes)) - } - break - } - } - - if len(idBytes) > 0 { - ids = append(ids, string(idBytes)) - } - } - v.(*distribute.BulkDeleteRequest).Ids = ids - return nil - default: - return json.Unmarshal(buffer, v) - } - }, - ) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *TextMarshaler) NewEncoder(w io.Writer) runtime.Encoder { - return json.NewEncoder(w) -} - -// Delimiter for newline encoded JSON streams. -func (j *TextMarshaler) Delimiter() []byte { - return []byte("\n") -} - -type GRPCGateway struct { - grpcGatewayAddr string - grpcAddr string - logger *zap.Logger - - ctx context.Context - cancel context.CancelFunc - listener net.Listener -} - -func NewGRPCGateway(grpcGatewayAddr string, grpcAddr string, logger *zap.Logger) (*GRPCGateway, error) { - return &GRPCGateway{ - grpcGatewayAddr: grpcGatewayAddr, - grpcAddr: grpcAddr, - logger: logger, - }, nil -} - -func (s *GRPCGateway) Start() error { - s.ctx, s.cancel = NewGRPCContext() - - mux := runtime.NewServeMux( - runtime.WithMarshalerOption("application/json", new(JsonMarshaler)), - runtime.WithMarshalerOption("application/x-ndjson", new(JsonlMarshaler)), - runtime.WithMarshalerOption("text/plain", new(TextMarshaler)), - ) - opts := []grpc.DialOption{grpc.WithInsecure()} - - err := distribute.RegisterDistributeHandlerFromEndpoint(s.ctx, mux, s.grpcAddr, opts) - if err != nil { - return err - } - - s.listener, err = net.Listen("tcp", s.grpcGatewayAddr) - if err != nil { - return err - } - - err = http.Serve(s.listener, mux) - if err != nil { - return err - } - - return nil -} - -func (s *GRPCGateway) Stop() error { - defer s.cancel() - - err := s.listener.Close() - if err != nil { - return err - } - - return nil -} - -func (s *GRPCGateway) GetAddress() (string, error) { - tcpAddr, err := net.ResolveTCPAddr("tcp", s.listener.Addr().String()) - if err != nil { - return "", err - } - - v4Addr := "" - if tcpAddr.IP.To4() != nil { - v4Addr = tcpAddr.IP.To4().String() - } - port := tcpAddr.Port - - return fmt.Sprintf("%s:%d", v4Addr, port), nil -} diff --git a/dispatcher/grpc_server.go b/dispatcher/grpc_server.go deleted file mode 100644 index 7bc684e..0000000 --- a/dispatcher/grpc_server.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - "net" - - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/mosuka/blast/protobuf/distribute" - "go.uber.org/zap" - "google.golang.org/grpc" - //grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" - //grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" - //grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags" - //grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" -) - -type GRPCServer struct { - service distribute.DistributeServer - server *grpc.Server - listener net.Listener - - logger *zap.Logger -} - -func NewGRPCServer(grpcAddr string, service distribute.DistributeServer, logger *zap.Logger) (*GRPCServer, error) { - server := grpc.NewServer( - grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( - //grpc_ctxtags.StreamServerInterceptor(), - //grpc_opentracing.StreamServerInterceptor(), - grpc_prometheus.StreamServerInterceptor, - grpc_zap.StreamServerInterceptor(logger), - //grpc_auth.StreamServerInterceptor(myAuthFunction), - //grpc_recovery.StreamServerInterceptor(), - )), - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( - //grpc_ctxtags.UnaryServerInterceptor(), - //grpc_opentracing.UnaryServerInterceptor(), - grpc_prometheus.UnaryServerInterceptor, - grpc_zap.UnaryServerInterceptor(logger), - //grpc_auth.UnaryServerInterceptor(myAuthFunction), - //grpc_recovery.UnaryServerInterceptor(), - )), - ) - - distribute.RegisterDistributeServer(server, service) - - grpc_prometheus.EnableHandlingTimeHistogram() - grpc_prometheus.Register(server) - - listener, err := net.Listen("tcp", grpcAddr) - if err != nil { - return nil, err - } - - return &GRPCServer{ - service: service, - server: server, - listener: listener, - logger: logger, - }, nil -} - -func (s *GRPCServer) Start() error { - s.logger.Info("start server") - err := s.server.Serve(s.listener) - if err != nil { - return err - } - - return nil -} - -func (s *GRPCServer) Stop() error { - s.logger.Info("stop server") - s.server.Stop() - //s.server.GracefulStop() - - return nil -} diff --git a/dispatcher/grpc_service.go b/dispatcher/grpc_service.go deleted file mode 100644 index 0657119..0000000 --- a/dispatcher/grpc_service.go +++ /dev/null @@ -1,974 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - "context" - "encoding/json" - "errors" - "hash/fnv" - "io" - "math/rand" - "sort" - "sync" - "time" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/search" - "github.com/golang/protobuf/ptypes/any" - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/distribute" - "github.com/mosuka/blast/protobuf/index" - "github.com/mosuka/blast/protobuf/management" - "github.com/mosuka/blast/sortutils" - "go.uber.org/zap" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type GRPCService struct { - managerGrpcAddress string - logger *zap.Logger - - managers *management.Cluster - managerClients map[string]*manager.GRPCClient - updateManagersStopCh chan struct{} - updateManagersDoneCh chan struct{} - - indexers map[string]*index.Cluster - indexerClients map[string]map[string]*indexer.GRPCClient - updateIndexersStopCh chan struct{} - updateIndexersDoneCh chan struct{} -} - -func NewGRPCService(managerGrpcAddress string, logger *zap.Logger) (*GRPCService, error) { - return &GRPCService{ - managerGrpcAddress: managerGrpcAddress, - logger: logger, - - managers: &management.Cluster{Nodes: make(map[string]*management.Node, 0)}, - managerClients: make(map[string]*manager.GRPCClient, 0), - - indexers: make(map[string]*index.Cluster, 0), - indexerClients: make(map[string]map[string]*indexer.GRPCClient, 0), - }, nil -} - -func (s *GRPCService) Start() error { - var err error - s.managers, err = s.getManagerCluster(s.managerGrpcAddress) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - for id, node := range s.managers.Nodes { - client, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Fatal(err.Error(), zap.String("id", id), zap.String("grpc_address", s.managerGrpcAddress)) - } - s.managerClients[node.Id] = client - } - - s.logger.Info("start to update manager cluster info") - go s.startUpdateManagers(500 * time.Millisecond) - - s.logger.Info("start to update indexer cluster info") - go s.startUpdateIndexers(500 * time.Millisecond) - - return nil -} - -func (s *GRPCService) Stop() error { - s.logger.Info("stop to update manager cluster info") - s.stopUpdateManagers() - - s.logger.Info("stop to update indexer cluster info") - s.stopUpdateIndexers() - - return nil -} - -func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { - var client *manager.GRPCClient - - for id, node := range s.managers.Nodes { - if node.Metadata == nil { - s.logger.Warn("missing metadata", zap.String("id", id)) - continue - } - - if node.State == management.Node_FOLLOWER || node.State == management.Node_LEADER { - var ok bool - client, ok = s.managerClients[id] - if ok { - return client, nil - } else { - s.logger.Error("node does not exist", zap.String("id", id)) - } - } else { - s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", node.State.String())) - } - } - - err := errors.New("available client does not exist") - s.logger.Error(err.Error()) - - return nil, err -} - -func (s *GRPCService) getManagerCluster(managerAddr string) (*management.Cluster, error) { - client, err := manager.NewGRPCClient(managerAddr) - defer func() { - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - return - }() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - req := &empty.Empty{} - res, err := client.ClusterInfo(req) - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return res.Cluster, nil -} - -func (s *GRPCService) cloneManagerCluster(cluster *management.Cluster) (*management.Cluster, error) { - b, err := json.Marshal(cluster) - if err != nil { - return nil, err - } - - var clone *management.Cluster - err = json.Unmarshal(b, &clone) - if err != nil { - return nil, err - } - - return clone, nil -} - -func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { - s.updateManagersStopCh = make(chan struct{}) - s.updateManagersDoneCh = make(chan struct{}) - - defer func() { - close(s.updateManagersDoneCh) - }() - - for { - select { - case <-s.updateManagersStopCh: - s.logger.Info("received a request to stop updating a manager cluster") - return - default: - // get client for manager from the list - client, err := s.getManagerClient() - if err != nil { - s.logger.Error(err.Error()) - continue - } - - // create stream for watching cluster changes - req := &empty.Empty{} - stream, err := client.ClusterWatch(req) - if err != nil { - s.logger.Error(err.Error()) - continue - } - - s.logger.Info("wait for receive a manager cluster updates from stream") - resp, err := stream.Recv() - if err == io.EOF { - s.logger.Info(err.Error()) - continue - } - if err != nil { - s.logger.Error(err.Error()) - continue - } - s.logger.Info("cluster has changed", zap.Any("resp", resp)) - switch resp.Event { - case management.ClusterWatchResponse_JOIN, management.ClusterWatchResponse_UPDATE: - // add to cluster nodes - s.managers.Nodes[resp.Node.Id] = resp.Node - - // check node state - switch resp.Node.State { - case management.Node_UNKNOWN, management.Node_SHUTDOWN: - // close client - if client, exist := s.managerClients[resp.Node.Id]; exist { - s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id)) - } - delete(s.managerClients, resp.Node.Id) - } - default: // management.Node_FOLLOWER, management.Node_CANDIDATE, management.Node_LEADER - if resp.Node.Metadata.GrpcAddress == "" { - s.logger.Warn("missing gRPC address", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - continue - } - - // check client that already exist in the client list - if client, exist := s.managerClients[resp.Node.Id]; !exist { - // create new client - s.logger.Info("create gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - newClient, err := manager.NewGRPCClient(resp.Node.Metadata.GrpcAddress) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - continue - } - s.managerClients[resp.Node.Id] = newClient - } else { - if client.GetAddress() != resp.Node.Metadata.GrpcAddress { - // close client - s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id)) - } - delete(s.managerClients, resp.Node.Id) - - // re-create new client - s.logger.Info("re-create gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - newClient, err := manager.NewGRPCClient(resp.Node.Metadata.GrpcAddress) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - continue - } - s.managerClients[resp.Node.Id] = newClient - } - } - } - case management.ClusterWatchResponse_LEAVE: - if client, exist := s.managerClients[resp.Node.Id]; exist { - s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) - } - delete(s.managerClients, resp.Node.Id) - } - - if _, exist := s.managers.Nodes[resp.Node.Id]; exist { - delete(s.managers.Nodes, resp.Node.Id) - } - default: - s.logger.Debug("unknown event", zap.Any("event", resp.Event)) - continue - } - } - } -} - -func (s *GRPCService) stopUpdateManagers() { - s.logger.Info("close all manager clients") - for id, client := range s.managerClients { - s.logger.Debug("close manager client", zap.String("id", id), zap.String("address", client.GetAddress())) - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - } - - if s.updateManagersStopCh != nil { - s.logger.Info("send a request to stop updating a manager cluster") - close(s.updateManagersStopCh) - } - - s.logger.Info("wait for the manager cluster update to stop") - <-s.updateManagersDoneCh - s.logger.Info("the manager cluster update has been stopped") -} - -func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { - s.updateIndexersStopCh = make(chan struct{}) - s.updateIndexersDoneCh = make(chan struct{}) - - defer func() { - close(s.updateIndexersDoneCh) - }() - - // get active client for manager - client, err := s.getManagerClient() - if err != nil { - s.logger.Error(err.Error()) - } - - // get initial indexers - req := &management.GetRequest{ - Key: "/cluster/shards", - } - res, err := client.Get(req) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - if res.Value == nil { - s.logger.Error("/cluster/shards is nil") - } - - shards, err := protobuf.MarshalAny(res.Value) - for shardId, shard := range *shards.(*map[string]interface{}) { - shardBytes, err := json.Marshal(shard) - if err != nil { - s.logger.Error(err.Error()) - continue - } - - var cluster *index.Cluster - err = json.Unmarshal(shardBytes, &cluster) - if err != nil { - s.logger.Error(err.Error()) - continue - } - - s.indexers[shardId] = cluster - - for nodeId, node := range cluster.Nodes { - if node.Metadata.GrpcAddress == "" { - s.logger.Warn("missing gRPC address", zap.String("id", node.Id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - newClient, err := indexer.NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - if _, exist := s.indexerClients[shardId]; !exist { - s.indexerClients[shardId] = make(map[string]*indexer.GRPCClient) - } - s.indexerClients[shardId][nodeId] = newClient - } - } - - for { - select { - case <-s.updateIndexersStopCh: - s.logger.Info("received a request to stop updating a indexer cluster") - return - default: - client, err = s.getManagerClient() - if err != nil { - s.logger.Error(err.Error()) - continue - } - - watchReq := &management.WatchRequest{ - Key: "/cluster/shards/", - } - stream, err := client.Watch(watchReq) - if err != nil { - s.logger.Error(err.Error()) - continue - } - - s.logger.Info("wait for receive a indexer cluster updates from stream") - resp, err := stream.Recv() - if err == io.EOF { - continue - } - if err != nil { - s.logger.Error(err.Error()) - continue - } - s.logger.Debug("data has changed", zap.Any("command", resp.Command), zap.String("key", resp.Key), zap.Any("value", resp.Value)) - - getReq := &management.GetRequest{ - Key: "/cluster/shards/", - } - res, err := client.Get(getReq) - if err != nil { - s.logger.Error(err.Error()) - continue - } - if res.Value == nil { - s.logger.Error("/cluster/shards is nil") - continue - } - - shards, err := protobuf.MarshalAny(res.Value) - for shardId, shard := range *shards.(*map[string]interface{}) { - shardBytes, err := json.Marshal(shard) - if err != nil { - s.logger.Error(err.Error()) - continue - } - - var cluster *index.Cluster - err = json.Unmarshal(shardBytes, &cluster) - if err != nil { - s.logger.Error(err.Error()) - continue - } - - s.indexers[shardId] = cluster - - if _, exist := s.indexerClients[shardId]; !exist { - s.indexerClients[shardId] = make(map[string]*indexer.GRPCClient) - } - - // open clients for indexer nodes - for nodeId, node := range cluster.Nodes { - if node.Metadata.GrpcAddress == "" { - s.logger.Warn("missing gRPC address", zap.String("id", node.Id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - - // check client that already exist in the client list - if client, exist := s.indexerClients[shardId][node.Id]; !exist { - // create new client - newClient, err := indexer.NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - s.indexerClients[shardId][nodeId] = newClient - } else { - if client.GetAddress() != node.Metadata.GrpcAddress { - // close client - s.logger.Info("close gRPC client", zap.String("id", node.Id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", node.Id)) - } - delete(s.indexerClients[shardId], node.Id) - - // re-create new client - newClient, err := indexer.NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - s.indexerClients[shardId][nodeId] = newClient - } - } - } - - // close clients for non-existent indexer nodes - for id, client := range s.indexerClients[shardId] { - if _, exist := s.indexers[shardId].Nodes[id]; !exist { - s.logger.Info("close gRPC client", zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) - } - delete(s.indexerClients[shardId], id) - } - } - } - } - } -} - -func (s *GRPCService) stopUpdateIndexers() { - s.logger.Info("close all indexer clients") - for clusterId, cluster := range s.indexerClients { - for id, client := range cluster { - s.logger.Debug("close indexer client", zap.String("cluster_id", clusterId), zap.String("id", id), zap.String("address", client.GetAddress())) - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - } - } - - if s.updateIndexersStopCh != nil { - s.logger.Info("send a request to stop updating a index cluster") - close(s.updateIndexersStopCh) - } - - s.logger.Info("wait for the indexer cluster update to stop") - <-s.updateIndexersDoneCh - s.logger.Info("the indexer cluster update has been stopped") -} - -func (s *GRPCService) getIndexerClients() map[string]*indexer.GRPCClient { - indexerClients := make(map[string]*indexer.GRPCClient, 0) - - for clusterId, cluster := range s.indexerClients { - nodeIds := make([]string, 0) - for nodeId := range cluster { - nodeIds = append(nodeIds, nodeId) - } - - // pick a client at random - nodeId := nodeIds[rand.New(rand.NewSource(time.Now().UnixNano())).Intn(len(nodeIds))] - - indexerClients[clusterId] = s.indexerClients[clusterId][nodeId] - } - - return indexerClients -} - -func (s *GRPCService) NodeHealthCheck(ctx context.Context, req *distribute.NodeHealthCheckRequest) (*distribute.NodeHealthCheckResponse, error) { - resp := &distribute.NodeHealthCheckResponse{} - - switch req.Probe { - case distribute.NodeHealthCheckRequest_UNKNOWN: - fallthrough - case distribute.NodeHealthCheckRequest_HEALTHINESS: - resp.State = distribute.NodeHealthCheckResponse_HEALTHY - case distribute.NodeHealthCheckRequest_LIVENESS: - resp.State = distribute.NodeHealthCheckResponse_ALIVE - case distribute.NodeHealthCheckRequest_READINESS: - resp.State = distribute.NodeHealthCheckResponse_READY - default: - err := errors.New("unknown probe") - s.logger.Error(err.Error()) - return resp, status.Error(codes.InvalidArgument, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) Get(ctx context.Context, req *distribute.GetRequest) (*distribute.GetResponse, error) { - indexerClients := s.getIndexerClients() - - // cluster id list sorted by cluster id - clusterIds := make([]string, 0) - for clusterId := range indexerClients { - clusterIds = append(clusterIds, clusterId) - sort.Strings(clusterIds) - } - - type respVal struct { - clusterId string - res *index.GetResponse - err error - } - - // create response channel - respChan := make(chan respVal, len(clusterIds)) - - wg := &sync.WaitGroup{} - for clusterId, client := range indexerClients { - wg.Add(1) - go func(clusterId string, client *indexer.GRPCClient, id string, respChan chan respVal) { - // index documents - req := &index.GetRequest{ - Id: id, - } - res, err := client.Get(req) - - wg.Done() - respChan <- respVal{ - clusterId: clusterId, - res: res, - err: err, - } - }(clusterId, client, req.Id, respChan) - } - wg.Wait() - - // close response channel - close(respChan) - - // summarize responses - iRes := &index.GetResponse{} - for r := range respChan { - if r.res != nil { - iRes = r.res - } - if r.err != nil { - s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) - } - } - - resp := &distribute.GetResponse{ - Fields: iRes.Fields, - } - - return resp, nil -} - -func (s *GRPCService) docIdHash(docId string) uint64 { - hash := fnv.New64() - _, err := hash.Write([]byte(docId)) - if err != nil { - return 0 - } - - return hash.Sum64() -} - -func (s *GRPCService) Index(ctx context.Context, req *distribute.IndexRequest) (*empty.Empty, error) { - res := &empty.Empty{} - - indexerClients := s.getIndexerClients() - - // cluster id list sorted by cluster id - clusterIds := make([]string, 0) - for clusterId := range indexerClients { - clusterIds = append(clusterIds, clusterId) - sort.Strings(clusterIds) - } - - docIdHash := s.docIdHash(req.Id) - clusterNum := uint64(len(indexerClients)) - clusterId := clusterIds[int(docIdHash%clusterNum)] - - iReq := &index.IndexRequest{ - Id: req.Id, - Fields: req.Fields, - } - - res, err := indexerClients[clusterId].Index(iReq) - if err != nil { - s.logger.Error(err.Error()) - return res, status.Error(codes.Internal, err.Error()) - } - - return res, nil -} - -func (s *GRPCService) Delete(ctx context.Context, req *distribute.DeleteRequest) (*empty.Empty, error) { - resp := &empty.Empty{} - - indexerClients := s.getIndexerClients() - - // cluster id list sorted by cluster id - clusterIds := make([]string, 0) - for clusterId := range indexerClients { - clusterIds = append(clusterIds, clusterId) - sort.Strings(clusterIds) - } - - type respVal struct { - clusterId string - err error - } - - // create response channel - respChan := make(chan respVal, len(clusterIds)) - - wg := &sync.WaitGroup{} - for clusterId, client := range indexerClients { - wg.Add(1) - go func(clusterId string, client *indexer.GRPCClient, id string, respChan chan respVal) { - // index documents - iReq := &index.DeleteRequest{Id: id} - _, err := client.Delete(iReq) - wg.Done() - respChan <- respVal{ - clusterId: clusterId, - err: err, - } - }(clusterId, client, req.Id, respChan) - } - wg.Wait() - - // close response channel - close(respChan) - - for r := range respChan { - if r.err != nil { - s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) - } - } - - return resp, nil -} - -func (s *GRPCService) BulkIndex(ctx context.Context, req *distribute.BulkIndexRequest) (*distribute.BulkIndexResponse, error) { - indexerClients := s.getIndexerClients() - - // cluster id list sorted by cluster id - clusterIds := make([]string, 0) - for clusterId := range indexerClients { - clusterIds = append(clusterIds, clusterId) - sort.Strings(clusterIds) - } - - // initialize document list for each cluster - docSet := make(map[string][]*index.Document, 0) - for _, clusterId := range clusterIds { - docSet[clusterId] = make([]*index.Document, 0) - } - - for _, doc := range req.Documents { - // distribute documents to each cluster based on document id - docIdHash := s.docIdHash(doc.Id) - clusterNum := uint64(len(indexerClients)) - clusterId := clusterIds[int(docIdHash%clusterNum)] - docSet[clusterId] = append(docSet[clusterId], doc) - } - - type respVal struct { - clusterId string - res *index.BulkIndexResponse - err error - } - - // create response channel - respChan := make(chan respVal, len(clusterIds)) - - wg := &sync.WaitGroup{} - for clusterId, docs := range docSet { - wg.Add(1) - go func(clusterId string, docs []*index.Document, respChan chan respVal) { - iReq := &index.BulkIndexRequest{ - Documents: docs, - } - iRes, err := indexerClients[clusterId].BulkIndex(iReq) - wg.Done() - respChan <- respVal{ - clusterId: clusterId, - res: iRes, - err: err, - } - }(clusterId, docs, respChan) - } - wg.Wait() - - // close response channel - close(respChan) - - // summarize responses - totalCount := 0 - for r := range respChan { - if r.res.Count >= 0 { - totalCount += int(r.res.Count) - } - if r.err != nil { - s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) - } - } - - // response - return &distribute.BulkIndexResponse{ - Count: int32(totalCount), - }, nil -} - -func (s *GRPCService) BulkDelete(ctx context.Context, req *distribute.BulkDeleteRequest) (*distribute.BulkDeleteResponse, error) { - indexerClients := s.getIndexerClients() - - // cluster id list sorted by cluster id - clusterIds := make([]string, 0) - for clusterId := range indexerClients { - clusterIds = append(clusterIds, clusterId) - sort.Strings(clusterIds) - } - - type respVal struct { - clusterId string - res *index.BulkDeleteResponse - err error - } - - // create response channel - respChan := make(chan respVal, len(clusterIds)) - - wg := &sync.WaitGroup{} - for clusterId, client := range indexerClients { - wg.Add(1) - go func(clusterId string, client *indexer.GRPCClient, ids []string, respChan chan respVal) { - // index documents - iReq := &index.BulkDeleteRequest{ - Ids: ids, - } - iRes, err := client.BulkDelete(iReq) - wg.Done() - respChan <- respVal{ - clusterId: clusterId, - res: iRes, - err: err, - } - }(clusterId, client, req.Ids, respChan) - } - wg.Wait() - - // close response channel - close(respChan) - - // summarize responses - totalCount := 0 - for r := range respChan { - if r.res.Count >= 0 { - totalCount += int(r.res.Count) - } - if r.err != nil { - s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) - } - } - // response - return &distribute.BulkDeleteResponse{ - Count: int32(totalCount), - }, nil -} - -func (s *GRPCService) Search(ctx context.Context, req *distribute.SearchRequest) (*distribute.SearchResponse, error) { - start := time.Now() - - resp := &distribute.SearchResponse{} - - indexerClients := s.getIndexerClients() - - // cluster id list sorted by cluster id - clusterIds := make([]string, 0) - for clusterId := range indexerClients { - clusterIds = append(clusterIds, clusterId) - sort.Strings(clusterIds) - } - - type respVal struct { - clusterId string - searchResult *bleve.SearchResult - err error - } - - // create response channel - respChan := make(chan respVal, len(clusterIds)) - - // create search request - ins, err := protobuf.MarshalAny(req.SearchRequest) - if err != nil { - s.logger.Error(err.Error()) - return resp, err - } - searchRequest := ins.(*bleve.SearchRequest) - - // change to distributed search request - from := searchRequest.From - size := searchRequest.Size - searchRequest.From = 0 - searchRequest.Size = from + size - - wg := &sync.WaitGroup{} - for clusterId, client := range indexerClients { - wg.Add(1) - go func(clusterId string, client *indexer.GRPCClient, searchRequest *bleve.SearchRequest, respChan chan respVal) { - searchRequestAny := &any.Any{} - err := protobuf.UnmarshalAny(searchRequest, searchRequestAny) - if err != nil { - respChan <- respVal{ - clusterId: clusterId, - searchResult: nil, - err: err, - } - return - } - - iReq := &index.SearchRequest{ - SearchRequest: searchRequestAny, - } - - iRes, err := client.Search(iReq) - - searchResult, err := protobuf.MarshalAny(iRes.SearchResult) - if err != nil { - respChan <- respVal{ - clusterId: clusterId, - searchResult: nil, - err: err, - } - return - } - - wg.Done() - respChan <- respVal{ - clusterId: clusterId, - searchResult: searchResult.(*bleve.SearchResult), - err: err, - } - }(clusterId, client, searchRequest, respChan) - } - wg.Wait() - - // close response channel - close(respChan) - - // revert to original search request - searchRequest.From = from - searchRequest.Size = size - - // summarize responses - var searchResult *bleve.SearchResult - for r := range respChan { - if r.searchResult != nil { - if searchResult == nil { - searchResult = r.searchResult - } else { - searchResult.Merge(r.searchResult) - } - } - if r.err != nil { - s.logger.Error(r.err.Error(), zap.String("cluster_id", r.clusterId)) - } - } - - // handle case where no results were successful - if searchResult == nil { - searchResult = &bleve.SearchResult{ - Status: &bleve.SearchStatus{ - Errors: make(map[string]error), - }, - } - } - - // sort all hits with the requested order - if len(searchRequest.Sort) > 0 { - sorter := sortutils.NewMultiSearchHitSorter(searchRequest.Sort, searchResult.Hits) - sort.Sort(sorter) - } - - // now skip over the correct From - if searchRequest.From > 0 && len(searchResult.Hits) > searchRequest.From { - searchResult.Hits = searchResult.Hits[searchRequest.From:] - } else if searchRequest.From > 0 { - searchResult.Hits = search.DocumentMatchCollection{} - } - - // now trim to the correct size - if searchRequest.Size > 0 && len(searchResult.Hits) > searchRequest.Size { - searchResult.Hits = searchResult.Hits[0:searchRequest.Size] - } - - // fix up facets - for name, fr := range searchRequest.Facets { - searchResult.Facets.Fixup(name, fr.Size) - } - - // fix up original request - searchResult.Request = searchRequest - searchDuration := time.Since(start) - searchResult.Took = searchDuration - - searchResultAny := &any.Any{} - err = protobuf.UnmarshalAny(searchResult, searchResultAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, err - } - - // response - resp.SearchResult = searchResultAny - - return resp, nil -} diff --git a/dispatcher/http_handler.go b/dispatcher/http_handler.go deleted file mode 100644 index 3e2ec1b..0000000 --- a/dispatcher/http_handler.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - "net/http" - "time" - - "github.com/gorilla/mux" - blasthttp "github.com/mosuka/blast/http" - "github.com/mosuka/blast/version" - "github.com/prometheus/client_golang/prometheus/promhttp" - "go.uber.org/zap" -) - -type Router struct { - mux.Router - - logger *zap.Logger -} - -func NewRouter(logger *zap.Logger) (*Router, error) { - router := &Router{ - logger: logger, - } - - router.StrictSlash(true) - - router.Handle("/", NewRootHandler(logger)).Methods("GET") - router.Handle("/metrics", promhttp.Handler()).Methods("GET") - - return router, nil -} - -func (r *Router) Close() error { - return nil -} - -type RootHandler struct { - logger *zap.Logger -} - -func NewRootHandler(logger *zap.Logger) *RootHandler { - return &RootHandler{ - logger: logger, - } -} - -func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - msgMap := map[string]interface{}{ - "version": version.Version, - "status": status, - } - - content, err := blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} diff --git a/dispatcher/http_server.go b/dispatcher/http_server.go deleted file mode 100644 index 5d3fbda..0000000 --- a/dispatcher/http_server.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - "net" - "net/http" - - accesslog "github.com/mash/go-accesslog" - "go.uber.org/zap" -) - -type HTTPServer struct { - listener net.Listener - router *Router - - logger *zap.Logger - httpLogger accesslog.Logger -} - -func NewHTTPServer(httpAddr string, router *Router, logger *zap.Logger, httpLogger accesslog.Logger) (*HTTPServer, error) { - listener, err := net.Listen("tcp", httpAddr) - if err != nil { - return nil, err - } - - return &HTTPServer{ - listener: listener, - router: router, - logger: logger, - httpLogger: httpLogger, - }, nil -} - -func (s *HTTPServer) Start() error { - err := http.Serve( - s.listener, - accesslog.NewLoggingHandler( - s.router, - s.httpLogger, - ), - ) - if err != nil { - return err - } - - return nil -} - -func (s *HTTPServer) Stop() error { - err := s.listener.Close() - if err != nil { - return err - } - - return nil -} diff --git a/dispatcher/server.go b/dispatcher/server.go deleted file mode 100644 index 529401e..0000000 --- a/dispatcher/server.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - accesslog "github.com/mash/go-accesslog" - "go.uber.org/zap" -) - -type Server struct { - managerGrpcAddress string - grpcAddress string - grpcGatewayAddress string - httpAddress string - logger *zap.Logger - grpcLogger *zap.Logger - httpLogger accesslog.Logger - - grpcService *GRPCService - grpcServer *GRPCServer - grpcGateway *GRPCGateway - httpRouter *Router - httpServer *HTTPServer -} - -func NewServer(managerGrpcAddress string, grpcAddress string, grpcGatewayAddress string, httpAddress string, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { - return &Server{ - managerGrpcAddress: managerGrpcAddress, - grpcAddress: grpcAddress, - grpcGatewayAddress: grpcGatewayAddress, - httpAddress: httpAddress, - logger: logger, - grpcLogger: grpcLogger, - httpLogger: httpLogger, - }, nil -} - -func (s *Server) Start() { - var err error - - // create gRPC service - s.grpcService, err = NewGRPCService(s.managerGrpcAddress, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC server - s.grpcServer, err = NewGRPCServer(s.grpcAddress, s.grpcService, s.grpcLogger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC gateway - s.grpcGateway, err = NewGRPCGateway(s.grpcGatewayAddress, s.grpcAddress, s.logger) - if err != nil { - s.logger.Error(err.Error()) - return - } - - // create HTTP router - s.httpRouter, err = NewRouter(s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create HTTP server - s.httpServer, err = NewHTTPServer(s.httpAddress, s.httpRouter, s.logger, s.httpLogger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // start gRPC service - s.logger.Info("start gRPC service") - go func() { - err := s.grpcService.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - - // start gRPC server - s.logger.Info("start gRPC server") - go func() { - err := s.grpcServer.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - - // start gRPC gateway - s.logger.Info("start gRPC gateway") - go func() { - _ = s.grpcGateway.Start() - }() - - // start HTTP server - s.logger.Info("start HTTP server") - go func() { - _ = s.httpServer.Start() - }() -} - -func (s *Server) Stop() { - s.logger.Info("stop HTTP server") - err := s.httpServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop HTTP router") - err = s.httpRouter.Close() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC gateway") - err = s.grpcGateway.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC server") - err = s.grpcServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC service") - err = s.grpcService.Stop() - if err != nil { - s.logger.Error(err.Error()) - } -} diff --git a/dispatcher/server_test.go b/dispatcher/server_test.go deleted file mode 100644 index dd727d7..0000000 --- a/dispatcher/server_test.go +++ /dev/null @@ -1,610 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dispatcher - -import ( - "fmt" - "os" - "path/filepath" - "reflect" - "testing" - "time" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/google/go-cmp/cmp" - "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/indexutils" - "github.com/mosuka/blast/logutils" - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf/index" - "github.com/mosuka/blast/protobuf/management" - "github.com/mosuka/blast/testutils" -) - -func TestServer_Start(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("INFO", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerPeerGrpcAddress1 := "" - managerGrpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerGrpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerHttpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerNodeId1 := "manager1" - managerBindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerDataDir1 := testutils.TmpDir() - managerRaftStorageType1 := "boltdb" - - managerNode1 := &management.Node{ - Id: managerNodeId1, - BindAddress: managerBindAddress1, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress1, - GrpcGatewayAddress: managerGrpcGatewayAddress1, - HttpAddress: managerHttpAddress1, - }, - } - - managerIndexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - managerIndexType1 := "upside_down" - managerIndexStorageType1 := "boltdb" - - // create server - managerServer1, err := manager.NewServer(managerPeerGrpcAddress1, managerNode1, managerDataDir1, managerRaftStorageType1, managerIndexMapping1, managerIndexType1, managerIndexStorageType1, logger.Named(managerNodeId1), grpcLogger.Named(managerNodeId1), httpAccessLogger) - defer func() { - if managerServer1 != nil { - managerServer1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - managerServer1.Start() - - // sleep - time.Sleep(5 * time.Second) - - managerPeerGrpcAddress2 := managerGrpcAddress1 - managerGrpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerGrpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerHttpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerNodeId2 := "manager2" - managerBindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerDataDir2 := testutils.TmpDir() - managerRaftStorageType2 := "boltdb" - - managerNode2 := &management.Node{ - Id: managerNodeId2, - BindAddress: managerBindAddress2, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress2, - GrpcGatewayAddress: managerGrpcGatewayAddress2, - HttpAddress: managerHttpAddress2, - }, - } - - managerIndexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - managerIndexType2 := "upside_down" - managerIndexStorageType2 := "boltdb" - - // create server - managerServer2, err := manager.NewServer(managerPeerGrpcAddress2, managerNode2, managerDataDir2, managerRaftStorageType2, managerIndexMapping2, managerIndexType2, managerIndexStorageType2, logger.Named(managerNodeId2), grpcLogger.Named(managerNodeId2), httpAccessLogger) - defer func() { - if managerServer2 != nil { - managerServer2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - managerServer2.Start() - - // sleep - time.Sleep(5 * time.Second) - - managerPeerGrpcAddress3 := managerGrpcAddress1 - managerGrpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerGrpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerHttpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerNodeId3 := "manager3" - managerBindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - managerDataDir3 := testutils.TmpDir() - managerRaftStorageType3 := "boltdb" - - managerNode3 := &management.Node{ - Id: managerNodeId3, - BindAddress: managerBindAddress3, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress3, - GrpcGatewayAddress: managerGrpcGatewayAddress3, - HttpAddress: managerHttpAddress3, - }, - } - - managerIndexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - managerIndexType3 := "upside_down" - managerIndexStorageType3 := "boltdb" - - // create server - managerServer3, err := manager.NewServer(managerPeerGrpcAddress3, managerNode3, managerDataDir3, managerRaftStorageType3, managerIndexMapping3, managerIndexType3, managerIndexStorageType3, logger.Named(managerNodeId3), grpcLogger.Named(managerNodeId3), httpAccessLogger) - defer func() { - if managerServer3 != nil { - managerServer3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - managerServer3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for manager1 - managerClient1, err := manager.NewGRPCClient(managerNode1.Metadata.GrpcAddress) - defer func() { - _ = managerClient1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - // get cluster info from manager1 - resClusterInfo, err := managerClient1.ClusterInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expManagerCluster1 := &management.Cluster{ - Nodes: map[string]*management.Node{ - managerNodeId1: { - Id: managerNodeId1, - BindAddress: managerBindAddress1, - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress1, - GrpcGatewayAddress: managerGrpcGatewayAddress1, - HttpAddress: managerHttpAddress1, - }, - }, - managerNodeId2: { - Id: managerNodeId2, - BindAddress: managerBindAddress2, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress2, - GrpcGatewayAddress: managerGrpcGatewayAddress2, - HttpAddress: managerHttpAddress2, - }, - }, - managerNodeId3: { - Id: managerNodeId3, - BindAddress: managerBindAddress3, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: managerGrpcAddress3, - GrpcGatewayAddress: managerGrpcGatewayAddress3, - HttpAddress: managerHttpAddress3, - }, - }, - }, - } - actManagerCluster1 := resClusterInfo.Cluster - if !reflect.DeepEqual(expManagerCluster1, actManagerCluster1) { - t.Fatalf("expected content to see %v, saw %v", expManagerCluster1, actManagerCluster1) - } - - // - // indexer cluster1 - // - indexerManagerGrpcAddress1 := managerGrpcAddress1 - indexerShardId1 := "shard1" - indexerPeerGrpcAddress1 := "" - indexerGrpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerGrpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerHttpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerNodeId1 := "indexer1" - indexerBindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerDataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(indexerDataDir1) - }() - indexerRaftStorageType1 := "boltdb" - - indexerNode1 := &index.Node{ - Id: indexerNodeId1, - BindAddress: indexerBindAddress1, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress1, - GrpcGatewayAddress: indexerGrpcGatewayAddress1, - HttpAddress: indexerHttpAddress1, - }, - } - indexerIndexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexerIndexType1 := "upside_down" - indexerIndexStorageType1 := "boltdb" - indexerServer1, err := indexer.NewServer(indexerManagerGrpcAddress1, indexerShardId1, indexerPeerGrpcAddress1, indexerNode1, indexerDataDir1, indexerRaftStorageType1, indexerIndexMapping1, indexerIndexType1, indexerIndexStorageType1, logger.Named(indexerNodeId1), grpcLogger.Named(indexerNodeId1), httpAccessLogger) - defer func() { - indexerServer1.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - indexerServer1.Start() - - // sleep - time.Sleep(5 * time.Second) - - indexerManagerGrpcAddress2 := managerGrpcAddress1 - indexerShardId2 := "shard1" - indexerPeerGrpcAddress2 := "" - indexerGrpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerGrpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerHttpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerNodeId2 := "indexer2" - indexerBindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerDataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(indexerDataDir2) - }() - indexerRaftStorageType2 := "boltdb" - - indexerNode2 := &index.Node{ - Id: indexerNodeId2, - BindAddress: indexerBindAddress2, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress2, - GrpcGatewayAddress: indexerGrpcGatewayAddress2, - HttpAddress: indexerHttpAddress2, - }, - } - indexerIndexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexerIndexType2 := "upside_down" - indexerIndexStorageType2 := "boltdb" - indexerServer2, err := indexer.NewServer(indexerManagerGrpcAddress2, indexerShardId2, indexerPeerGrpcAddress2, indexerNode2, indexerDataDir2, indexerRaftStorageType2, indexerIndexMapping2, indexerIndexType2, indexerIndexStorageType2, logger.Named(indexerNodeId2), grpcLogger.Named(indexerNodeId2), httpAccessLogger) - defer func() { - indexerServer2.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - indexerServer2.Start() - - // sleep - time.Sleep(5 * time.Second) - - indexerManagerGrpcAddress3 := managerGrpcAddress1 - indexerShardId3 := "shard1" - indexerPeerGrpcAddress3 := "" - indexerGrpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerGrpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerHttpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerNodeId3 := "indexer3" - indexerBindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerDataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(indexerDataDir3) - }() - indexerRaftStorageType3 := "boltdb" - - indexerNode3 := &index.Node{ - Id: indexerNodeId3, - BindAddress: indexerBindAddress3, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress3, - GrpcGatewayAddress: indexerGrpcGatewayAddress3, - HttpAddress: indexerHttpAddress3, - }, - } - indexerIndexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexerIndexType3 := "upside_down" - indexerIndexStorageType3 := "boltdb" - indexerServer3, err := indexer.NewServer(indexerManagerGrpcAddress3, indexerShardId3, indexerPeerGrpcAddress3, indexerNode3, indexerDataDir3, indexerRaftStorageType3, indexerIndexMapping3, indexerIndexType3, indexerIndexStorageType3, logger.Named(indexerNodeId3), grpcLogger.Named(indexerNodeId3), httpAccessLogger) - defer func() { - indexerServer3.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - indexerServer3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for manager1 - indexerClient1, err := indexer.NewGRPCClient(indexerNode1.Metadata.GrpcAddress) - defer func() { - _ = indexerClient1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - // get cluster info from manager1 - resClusterInfoIndexer1, err := indexerClient1.ClusterInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expIndexerCluster1 := &index.Cluster{ - Nodes: map[string]*index.Node{ - indexerNodeId1: { - Id: indexerNodeId1, - BindAddress: indexerBindAddress1, - State: index.Node_LEADER, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress1, - GrpcGatewayAddress: indexerGrpcGatewayAddress1, - HttpAddress: indexerHttpAddress1, - }, - }, - indexerNodeId2: { - Id: indexerNodeId2, - BindAddress: indexerBindAddress2, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress2, - GrpcGatewayAddress: indexerGrpcGatewayAddress2, - HttpAddress: indexerHttpAddress2, - }, - }, - indexerNodeId3: { - Id: indexerNodeId3, - BindAddress: indexerBindAddress3, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress3, - GrpcGatewayAddress: indexerGrpcGatewayAddress3, - HttpAddress: indexerHttpAddress3, - }, - }, - }, - } - actIndexerCluster1 := resClusterInfoIndexer1.Cluster - if !cmp.Equal(expIndexerCluster1, actIndexerCluster1) { - t.Fatalf("expected content to see %v, saw %v", expIndexerCluster1, actIndexerCluster1) - } - - // - // indexer cluster2 - // - indexerManagerGrpcAddress4 := managerGrpcAddress1 - indexerShardId4 := "shard2" - indexerPeerGrpcAddress4 := "" - indexerGrpcAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerGrpcGatewayAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerHttpAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerNodeId4 := "indexer4" - indexerBindAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerDataDir4 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(indexerDataDir4) - }() - indexerRaftStorageType4 := "boltdb" - - indexerNode4 := &index.Node{ - Id: indexerNodeId4, - BindAddress: indexerBindAddress4, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress4, - GrpcGatewayAddress: indexerGrpcGatewayAddress4, - HttpAddress: indexerHttpAddress4, - }, - } - indexerIndexMapping4, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexerIndexType4 := "upside_down" - indexerIndexStorageType4 := "boltdb" - indexerServer4, err := indexer.NewServer(indexerManagerGrpcAddress4, indexerShardId4, indexerPeerGrpcAddress4, indexerNode4, indexerDataDir4, indexerRaftStorageType4, indexerIndexMapping4, indexerIndexType4, indexerIndexStorageType4, logger.Named(indexerNodeId4), grpcLogger.Named(indexerNodeId4), httpAccessLogger) - defer func() { - indexerServer4.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - indexerServer4.Start() - - // sleep - time.Sleep(5 * time.Second) - - indexerManagerGrpcAddress5 := managerGrpcAddress1 - indexerShardId5 := "shard2" - indexerPeerGrpcAddress5 := "" - indexerGrpcAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerGrpcGatewayAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerHttpAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerNodeId5 := "indexer5" - indexerBindAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerDataDir5 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(indexerDataDir5) - }() - indexerRaftStorageType5 := "boltdb" - - indexerNode5 := &index.Node{ - Id: indexerNodeId5, - BindAddress: indexerBindAddress5, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress5, - GrpcGatewayAddress: indexerGrpcGatewayAddress5, - HttpAddress: indexerHttpAddress5, - }, - } - indexerIndexMapping5, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexerIndexType5 := "upside_down" - indexerIndexStorageType5 := "boltdb" - indexerServer5, err := indexer.NewServer(indexerManagerGrpcAddress5, indexerShardId5, indexerPeerGrpcAddress5, indexerNode5, indexerDataDir5, indexerRaftStorageType5, indexerIndexMapping5, indexerIndexType5, indexerIndexStorageType5, logger.Named(indexerNodeId5), grpcLogger.Named(indexerNodeId5), httpAccessLogger) - defer func() { - indexerServer5.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - indexerServer5.Start() - - // sleep - time.Sleep(5 * time.Second) - - indexerManagerGrpcAddress6 := managerGrpcAddress1 - indexerShardId6 := "shard2" - indexerPeerGrpcAddress6 := "" - indexerGrpcAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerGrpcGatewayAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerHttpAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerNodeId6 := "indexer6" - indexerBindAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) - indexerDataDir6 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(indexerDataDir6) - }() - indexerRaftStorageType6 := "boltdb" - - indexerNode6 := &index.Node{ - Id: indexerNodeId6, - BindAddress: indexerBindAddress6, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress6, - GrpcGatewayAddress: indexerGrpcGatewayAddress6, - HttpAddress: indexerHttpAddress6, - }, - } - indexerIndexMapping6, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexerIndexType6 := "upside_down" - indexerIndexStorageType6 := "boltdb" - indexerServer6, err := indexer.NewServer(indexerManagerGrpcAddress6, indexerShardId6, indexerPeerGrpcAddress6, indexerNode6, indexerDataDir6, indexerRaftStorageType6, indexerIndexMapping6, indexerIndexType6, indexerIndexStorageType6, logger.Named(indexerNodeId6), grpcLogger.Named(indexerNodeId6), httpAccessLogger) - defer func() { - indexerServer6.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - indexerServer6.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for manager1 - indexerClient2, err := indexer.NewGRPCClient(indexerNode4.Metadata.GrpcAddress) - defer func() { - _ = indexerClient1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - // get cluster info from manager1 - indexerCluster2, err := indexerClient2.ClusterInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expIndexerCluster2 := &index.Cluster{ - Nodes: map[string]*index.Node{ - indexerNodeId4: { - Id: indexerNodeId4, - BindAddress: indexerBindAddress4, - State: index.Node_LEADER, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress4, - GrpcGatewayAddress: indexerGrpcGatewayAddress4, - HttpAddress: indexerHttpAddress4, - }, - }, - indexerNodeId5: { - Id: indexerNodeId5, - BindAddress: indexerBindAddress5, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress5, - GrpcGatewayAddress: indexerGrpcGatewayAddress5, - HttpAddress: indexerHttpAddress5, - }, - }, - indexerNodeId6: { - Id: indexerNodeId6, - BindAddress: indexerBindAddress6, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: indexerGrpcAddress6, - GrpcGatewayAddress: indexerGrpcGatewayAddress6, - HttpAddress: indexerHttpAddress6, - }, - }, - }, - } - actIndexerCluster2 := indexerCluster2.Cluster - if !reflect.DeepEqual(expIndexerCluster2, actIndexerCluster2) { - t.Fatalf("expected content to see %v, saw %v", expIndexerCluster2, actIndexerCluster2) - } - - // - // dispatcher - // - dispatcherManagerGrpcAddress := managerGrpcAddress1 - dispatcherGrpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dispatcherGrpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dispatcherHttpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - - dispatcher1, err := NewServer(dispatcherManagerGrpcAddress, dispatcherGrpcAddress, dispatcherGrpcGatewayAddress, dispatcherHttpAddress, logger.Named("dispatcher1"), grpcLogger.Named("dispatcher1"), httpAccessLogger) - defer func() { - dispatcher1.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - dispatcher1.Start() - - // sleep - time.Sleep(5 * time.Second) -} diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 4d358be..0000000 --- a/docker-compose.yml +++ /dev/null @@ -1,221 +0,0 @@ -version: '3.4' - -networks: - blast-cluster: - driver: bridge - -services: - manager1: - container_name: manager1 - image: mosuka/blast:latest - restart: always - ports: - - 2110:2110 - - 5110:5110 - - 6110:6110 - - 8110:8110 - networks: - - blast-cluster - volumes: - - ./example:/opt/blast/example - command: > - blast manager start - --node-id=blast-manager1 - --node-address=manager1:2110 - --grpc-address=manager1:5110 - --grpc-gateway-address=manager1:6110 - --http-address=manager1:8110 - --data-dir=/tmp/blast/manager1 - --raft-storage-type=boltdb - --index-mapping-file=/opt/blast/example/wiki_index_mapping.json - --index-type=scorch - --index-storage-type=scorch - - indexer1: - container_name: indexer1 - image: mosuka/blast:latest - restart: always - ports: - - 2010:2010 - - 5010:5010 - - 6010:6010 - - 8010:8010 - networks: - - blast-cluster - volumes: - - ./example:/opt/blast/example - depends_on: - - manager1 - command: > - blast indexer start - --manager-grpc-address=manager1:5110 - --shard-id=shard1 - --node-id=blast-indexer1 - --node-address=indexer1:2010 - --grpc-address=indexer1:5010 - --grpc-gateway-address=indexer1:6010 - --http-address=indexer1:8010 - --data-dir=/tmp/blast/indexer1 - --raft-storage-type=boltdb - - indexer2: - container_name: indexer2 - image: mosuka/blast:latest - restart: always - ports: - - 2020:2020 - - 5020:5020 - - 6020:6020 - - 8020:8020 - networks: - - blast-cluster - volumes: - - ./example:/opt/blast/example - depends_on: - - manager1 - command: > - blast indexer start - --manager-grpc-address=manager1:5110 - --shard-id=shard1 - --node-id=blast-indexer2 - --node-address=indexer2:2020 - --grpc-address=indexer2:5020 - --grpc-gateway-address=indexer2:6020 - --http-address=indexer2:8020 - --data-dir=/tmp/blast/indexer2 - --raft-storage-type=boltdb - - indexer3: - container_name: indexer3 - image: mosuka/blast:latest - restart: always - ports: - - 2030:2030 - - 5030:5030 - - 6030:6030 - - 8030:8030 - networks: - - blast-cluster - volumes: - - ./example:/opt/blast/example - depends_on: - - manager1 - command: > - blast indexer start - --manager-grpc-address=manager1:5110 - --shard-id=shard1 - --node-id=blast-indexer3 - --node-address=indexer3:2030 - --grpc-address=indexer3:5030 - --grpc-gateway-address=indexer3:6030 - --http-address=indexer3:8030 - --data-dir=/tmp/blast/indexer3 - --raft-storage-type=boltdb - - indexer4: - container_name: indexer4 - image: mosuka/blast:latest - restart: always - ports: - - 2040:2040 - - 5040:5040 - - 6040:6040 - - 8040:8040 - networks: - - blast-cluster - volumes: - - ./example:/opt/blast/example - depends_on: - - manager1 - command: > - blast indexer start - --manager-grpc-address=manager1:5110 - --shard-id=shard2 - --node-id=blast-indexer4 - --node-address=indexer4:2040 - --grpc-address=indexer4:5040 - --grpc-gateway-address=indexer4:6040 - --http-address=indexer4:8040 - --data-dir=/tmp/blast/indexer4 - --raft-storage-type=boltdb - - indexer5: - container_name: indexer5 - image: mosuka/blast:latest - restart: always - ports: - - 2050:2050 - - 5050:5050 - - 6050:6050 - - 8050:8050 - networks: - - blast-cluster - volumes: - - ./example:/opt/blast/example - depends_on: - - manager1 - command: > - blast indexer start - --manager-grpc-address=manager1:5110 - --shard-id=shard2 - --node-id=blast-indexer5 - --node-address=indexer5:2050 - --grpc-address=indexer5:5050 - --grpc-gateway-address=indexer5:6050 - --http-address=indexer5:8050 - --data-dir=/tmp/blast/indexer5 - --raft-storage-type=boltdb - - indexer6: - container_name: indexer6 - image: mosuka/blast:latest - restart: always - ports: - - 2060:2060 - - 5060:5060 - - 6060:6060 - - 8060:8060 - networks: - - blast-cluster - volumes: - - ./example:/opt/blast/example - depends_on: - - manager1 - command: > - blast indexer start - --manager-grpc-address=manager1:5110 - --shard-id=shard2 - --node-id=blast-indexer6 - --node-address=indexer6:2060 - --grpc-address=indexer6:5060 - --grpc-gateway-address=indexer6:6060 - --http-address=indexer6:8060 - --data-dir=/tmp/blast/indexer6 - --raft-storage-type=boltdb - - dispatcher1: - container_name: dispatcher1 - image: mosuka/blast:latest - restart: always - ports: - - 5210:5210 - - 6210:6210 - - 8210:8210 - networks: - - blast-cluster - volumes: - - ./example:/opt/blast/example - depends_on: - - manager1 - - indexer1 - - indexer2 - - indexer3 - - indexer4 - - indexer5 - - indexer6 - command: > - blast dispatcher start - --manager-grpc-address=manager1:5110 - --grpc-address=dispatcher1:5210 - --grpc-gateway-address=dispatcher1:6210 - --http-address=dispatcher1:8210 diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh deleted file mode 100755 index 1cf687a..0000000 --- a/docker-entrypoint.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# Copyright (c) 2019 Minoru Osuka -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e - -exec "$@" diff --git a/errors/errors.go b/errors/errors.go index cc538f6..fcdf16f 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -1,23 +1,15 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package errors import "errors" var ( - ErrNotFoundLeader = errors.New("does not found leader") - ErrNotFound = errors.New("not found") - ErrTimeout = errors.New("timeout") + ErrNotFoundLeader = errors.New("does not found leader") + ErrNodeAlreadyExists = errors.New("node already exists") + ErrNodeDoesNotExist = errors.New("node does not exist") + ErrNodeNotReady = errors.New("node not ready") + ErrNotFound = errors.New("not found") + ErrTimeout = errors.New("timeout") + ErrNoUpdate = errors.New("no update") + ErrNil = errors.New("data is nil") + ErrUnsupportedEvent = errors.New("unsupported event") ) diff --git a/etc/blast.yaml b/etc/blast.yaml new file mode 100644 index 0000000..ab362c1 --- /dev/null +++ b/etc/blast.yaml @@ -0,0 +1,16 @@ +id: "node1" +raft_address: ":7000" +grpc_address: ":9000" +http_address: ":8000" +data_directory: "/tmp/blast/node1/data" +#mapping_file: "./etc/blast_mapping.json" +peer_grpc_address: "" +#certificate_file: "./etc/blast-cert.pem" +#key_file: "./etc/blast-key.pem" +#common_name: "localhost" +log_level: "INFO" +log_file: "" +#log_max_size: 500 +#log_max_backups: 3 +#log_max_age: 30 +#log_compress: false diff --git a/example/enwiki_index_mapping.json b/etc/blast_mapping.json similarity index 97% rename from example/enwiki_index_mapping.json rename to etc/blast_mapping.json index 2ef6200..118348c 100644 --- a/example/enwiki_index_mapping.json +++ b/etc/blast_mapping.json @@ -1,10 +1,10 @@ { "types": { - "enwiki": { + "example": { "enabled": true, "dynamic": true, "properties": { - "title_en": { + "title": { "enabled": true, "dynamic": true, "fields": [ @@ -19,7 +19,7 @@ ], "default_analyzer": "en" }, - "text_en": { + "text": { "enabled": true, "dynamic": true, "fields": [ diff --git a/example/geo_doc_2.json b/example/geo_doc_2.json deleted file mode 100644 index 0ca3e13..0000000 --- a/example/geo_doc_2.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "id": "2", - "fields": { - "name": "Capital City Brewing Company", - "city": "Washington", - "state": "District of Columbia", - "code": "20005", - "country": "United States", - "phone": "202.628.2222", - "website": "http://www.capcitybrew.com", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "Washington DC's first brewpub since prohibition, Capitol City Brewing Co. opened its doors in 1992. Our first location still stands in Downtown DC, at 11th and H St., NW. Our company policy is to bring the fine craft of brewing to every person who lives and visits our region, as well as treating them to a wonderful meal and a great experience.", - "address": [ - "1100 New York Ave, NW" - ], - "geo": { - "accuracy": "ROOFTOP", - "lat": 38.8999, - "lon": -77.0272 - } - } -} diff --git a/example/geo_doc_3.json b/example/geo_doc_3.json deleted file mode 100644 index 98c79c5..0000000 --- a/example/geo_doc_3.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "id": "3", - "fields": { - "name": "Firehouse Grill & Brewery", - "city": "Sunnyvale", - "state": "California", - "code": "94086", - "country": "United States", - "phone": "1-408-773-9500", - "website": "", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "", - "address": [ - "111 South Murphy Avenue" - ], - "geo": { - "accuracy": "RANGE_INTERPOLATED", - "lat": 37.3775, - "lon": -122.03 - } - } -} diff --git a/example/geo_doc_4.json b/example/geo_doc_4.json deleted file mode 100644 index fcdc08a..0000000 --- a/example/geo_doc_4.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "id": "4", - "fields": { - "name": "Hook & Ladder Brewing Company", - "city": "Silver Spring", - "state": "Maryland", - "code": "20910", - "country": "United States", - "phone": "301.565.4522", - "website": "http://www.hookandladderbeer.com", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "At Hook & Ladder Brewing we believe in great beer in the company of good friends, so we bring you three great beers for your drinking pleasure (please drink responsibly). Each of our beers is carefully crafted with the finest quality ingredients for a distinctive taste we know you will enjoy. Try one tonight, you just might get hooked. Through our own experiences in the fire and rescue service we have chosen the Hook & Ladder as a symbol of pride and honor to pay tribute to the brave men and women who serve and protect our communities.", - "address": [ - "8113 Fenton St." - ], - "geo": { - "accuracy": "ROOFTOP", - "lat": 38.9911, - "lon": -77.0237 - } - } -} diff --git a/example/geo_doc_5.json b/example/geo_doc_5.json deleted file mode 100644 index e2e6807..0000000 --- a/example/geo_doc_5.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "id": "5", - "fields": { - "name": "Jack's Brewing", - "city": "Fremont", - "state": "California", - "code": "94538", - "country": "United States", - "phone": "1-510-796-2036", - "website": "", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "", - "address": [ - "39176 Argonaut Way" - ], - "geo": { - "accuracy": "ROOFTOP", - "lat": 37.5441, - "lon": -121.988 - } - } -} diff --git a/example/geo_doc_6.json b/example/geo_doc_6.json deleted file mode 100644 index 8ecc9bb..0000000 --- a/example/geo_doc_6.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "id": "6", - "fields": { - "name": "Sweet Water Tavern and Brewery", - "city": "Sterling", - "state": "Virginia", - "code": "20121", - "country": "United States", - "phone": "(703) 449-1108", - "website": "http://www.greatamericanrestaurants.com/sweetMainSter/index.htm", - "type": "brewery", - "updated": "2010-07-22 20:00:20", - "description": "", - "address": [ - "45980 Waterview Plaza" - ], - "geo": { - "accuracy": "RANGE_INTERPOLATED", - "lat": 39.0324, - "lon": -77.4097 - } - } -} diff --git a/example/wiki_bulk_delete.txt b/example/wiki_bulk_delete.txt deleted file mode 100644 index 8928994..0000000 --- a/example/wiki_bulk_delete.txt +++ /dev/null @@ -1,36 +0,0 @@ -arwiki_1 -bgwiki_1 -cawiki_1 -cswiki_1 -dawiki_1 -dewiki_1 -elwiki_1 -enwiki_1 -eswiki_1 -fawiki_1 -fiwiki_1 -frwiki_1 -gawiki_1 -glwiki_1 -guwiki_1 -hiwiki_1 -huwiki_1 -hywiki_1 -idwiki_1 -itwiki_1 -jawiki_1 -knwiki_1 -kowiki_1 -mlwiki_1 -nlwiki_1 -nowiki_1 -pswiki_1 -ptwiki_1 -rowiki_1 -ruwiki_1 -svwiki_1 -tawiki_1 -tewiki_1 -thwiki_1 -trwiki_1 -zhwiki_1 diff --git a/example/wiki_bulk_index.jsonl b/example/wiki_bulk_index.jsonl deleted file mode 100644 index 32a0fbd..0000000 --- a/example/wiki_bulk_index.jsonl +++ /dev/null @@ -1,36 +0,0 @@ -{"id":"arwiki_1","fields":{"title_ar":"محرك بحث","text_ar":"محرك البحث (بالإنجليزية: Search engine) هو نظام لإسترجاع المعلومات صمم للمساعدة على البحث عن المعلومات المخزنة على أي نظام حاسوبي. تعرض نتائج البحث عادة على شكل قائمة لأماكن تواجد المعلومات ومرتبة وفق معايير معينة. تسمح محركات البحث باختصار مدة البحث والتغلب على مشكلة أحجام البيانات المتصاعدة (إغراق معلوماتي).","timestamp":"2018-03-25T18:04:00Z","_type":"arwiki"}} -{"id":"bgwiki_1","fields":{"title_bg":"Търсачка","text_bg":"Търсачка или търсеща машина (на английски: Web search engine) е специализиран софтуер за извличане на информация, съхранена в компютърна система или мрежа. Това може да е персонален компютър, Интернет, корпоративна мрежа и т.н. Без допълнителни уточнения, най-често под търсачка се разбира уеб(-)търсачка, която търси в Интернет. Други видове търсачки са корпоративните търсачки, които търсят в интранет мрежите, личните търсачки – за индивидуалните компютри и мобилните търсачки. В търсачката потребителят (търсещият) прави запитване за съдържание, отговарящо на определен критерий (обикновено такъв, който съдържа определени думи и фрази). В резултат се получават списък от точки, които отговарят, пълно или частично, на този критерий. Търсачките обикновено използват редовно подновявани индекси, за да оперират бързо и ефикасно. Някои търсачки също търсят в информацията, която е на разположение в нюзгрупите и други големи бази данни. За разлика от Уеб директориите, които се поддържат от хора редактори, търсачките оперират алгоритмично. Повечето Интернет търсачки са притежавани от различни корпорации.","timestamp":"2018-07-11T11:03:00Z","_type":"bgwiki"}} -{"id":"cawiki_1","fields":{"title_ca":"Motor de cerca","text_ca":"Un motor de cerca o de recerca o bé cercador és un programa informàtic dissenyat per ajudar a trobar informació emmagatzemada en un sistema informàtic com ara una xarxa, Internet, un servidor o un ordinador personal. L'objectiu principal és el de trobar altres programes informàtics, pàgines web i documents, entre d'altres. A partir d'una determinada paraula o paraules o una determinada frase l'usuari demana un contingut sota un criteri determinat i retorna una llista de referències que compleixin aquest criteri. El procés es realitza a través de les metadades, vies per comunicar informació que utilitzen els motors per cada cerca. Els índex que utilitzen els cercadors sempre estan actualitzats a través d'un robot web per generar rapidesa i eficàcia en la recerca. Els directoris, en canvi, són gestionats per editors humans.","timestamp":"2018-07-09T18:07:00Z","_type":"cawiki"}} -{"id":"cswiki_1","fields":{"title_cs":"Vyhledávač","text_cs":"Vyhledávač je počítačový systém či program, který umožňuje uživateli zadat nějaký libovolný nebo specifikovaný vyhledávaný výraz a získat z velkého objemu dat informace, které jsou v souladu s tímto dotazem. Jako vyhledávač se označují i ​​webové stránky, jejichž hlavní funkcí je poskytování takového systému či programu. Jako internetový vyhledávač se označuje buď vyhledávač, na který se přistupuje přes internet, nebo vyhledávač, jehož zdrojem vyhledávání je internet (tj. WWW, Usenet apod.). Jako online vyhledávač se označuje vyhledávač, při jehož výkonu činnosti dochází k výměně dat v rámci nějaké počítačové sítě, nejčastěji to je internetový vyhledávač. Fulltextový vyhledávač je vyhedávač, který vykonává fulltextové vyhledávání.","timestamp":"2017-11-10T21:59:00Z","_type":"cswiki"}} -{"id":"dawiki_1","fields":{"title_da":"Søgemaskine","text_da":"En søgemaskine er en applikation til at hjælpe en bruger med at finde information. Det kan f.eks. være at finde filer med bestemte data (f.eks. ord), gemt i en computers hukommelse, for eksempel via World Wide Web (kaldes så en websøgemaskine). Ofte bruges søgemaskine fejlagtigt om linkkataloger eller Netguider.","timestamp":"2017-09-04T01:54:00Z","_type":"dawiki"}} -{"id":"dewiki_1","fields":{"title_de":"Suchmaschine","text_de":"Eine Suchmaschine ist ein Programm zur Recherche von Dokumenten, die in einem Computer oder einem Computernetzwerk wie z. B. dem World Wide Web gespeichert sind. Internet-Suchmaschinen haben ihren Ursprung in Information-Retrieval-Systemen. Sie erstellen einen Schlüsselwort-Index für die Dokumentbasis, um Suchanfragen über Schlüsselwörter mit einer nach Relevanz geordneten Trefferliste zu beantworten. Nach Eingabe eines Suchbegriffs liefert eine Suchmaschine eine Liste von Verweisen auf möglicherweise relevante Dokumente, meistens dargestellt mit Titel und einem kurzen Auszug des jeweiligen Dokuments. Dabei können verschiedene Suchverfahren Anwendung finden.","timestamp":"2017-09-04T01:54:00Z","_type":"dewiki"}} -{"id":"elwiki_1","fields":{"title_el":"Μηχανή αναζήτησης","text_el":"Μια μηχανή αναζήτησης είναι μια εφαρμογή που επιτρέπει την αναζήτηση κειμένων και αρχείων στο Διαδίκτυο. Αποτελείται από ένα πρόγραμμα υπολογιστή που βρίσκεται σε έναν ή περισσότερους υπολογιστές στους οποίους δημιουργεί μια βάση δεδομένων με τις πληροφορίες που συλλέγει από το διαδίκτυο, και το διαδραστικό περιβάλλον που εμφανίζεται στον τελικό χρήστη ο οποίος χρησιμοποιεί την εφαρμογή από άλλον υπολογιστή συνδεδεμένο στο διαδίκτυο. Οι μηχανές αναζήτησης αποτελούνται από 3 είδη λογισμικού, το spider software, το index software και το query software.","timestamp":"2017-11-21T19:57:00Z","_type":"elwiki"}} -{"id":"enwiki_1","fields":{"title_en":"Search engine (computing)","text_en":"A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.","timestamp":"2018-07-04T05:41:00Z","_type":"enwiki"}} -{"id":"eswiki_1","fields":{"title_es":"Motor de búsqueda","text_es":"Un motor de búsqueda o buscador es un sistema informático que busca archivos almacenados en servidores web gracias a su spider (también llamado araña web). Un ejemplo son los buscadores de Internet (algunos buscan únicamente en la web, pero otros lo hacen además en noticias, servicios como Gopher, FTP, etc.) cuando se pide información sobre algún tema. Las búsquedas se hacen con palabras clave o con árboles jerárquicos por temas; el resultado de la búsqueda «Página de resultados del buscador» es un listado de direcciones web en los que se mencionan temas relacionados con las palabras clave buscadas. Como operan de forma automática, los motores de búsqueda contienen generalmente más información que los directorios. Sin embargo, estos últimos también han de construirse a partir de búsquedas (no automatizadas) o bien a partir de avisos dados por los creadores de páginas.","timestamp":"2018-08-30T11:30:00Z","_type":"eswiki"}} -{"id":"fawiki_1","fields":{"title_fa":"موتور جستجو (پردازش)","text_fa":"موتور جستجو یا جویشگر، در فرهنگ رایانه، به طور عمومی به برنامه‌ای گفته می‌شود که کلمات کلیدی را در یک سند یا بانک اطلاعاتی جستجو می‌کند. در اینترنت به برنامه‌ای گفته می‌شود که کلمات کلیدی موجود در فایل‌ها و سندهای وب جهانی، گروه‌های خبری، منوهای گوفر و آرشیوهای FTP را جستجو می‌کند. جویشگرهای زیادی وجود دارند که امروزه از معروفترین و پراستفاده‌ترین آنها می‌توان به google و یاهو! جستجو اشاره کرد.","timestamp":"2017-01-06T02:46:00Z","_type":"fawiki"}} -{"id":"fiwiki_1","fields":{"title_fi":"Hakukone","text_fi":"Hakukone on web-pohjainen ohjelma, joka etsii jatkuvasti Internetistä (varsinkin Webistä) uusia sivuja eritellen ja liittäen ne hakemistoonsa erityisten hakusanojen mukaan. Näitä hyväksi käyttäen hakukone tulostaa käyttäjän syöttämiä hakusanoja lähimpänä olevat sivut. Analysointi tapahtuu käytännössä eri hakukoneissa erilaisilla menetelmillä.","timestamp":"2017-10-04T14:33:00Z","_type":"fiwiki"}} -{"id":"frwiki_1","fields":{"title_fr":"Moteur de recherche","text_fr":"Un moteur de recherche est une application web permettant de trouver des ressources à partir d'une requête sous forme de mots. Les ressources peuvent être des pages web, des articles de forums Usenet, des images, des vidéos, des fichiers, etc. Certains sites web offrent un moteur de recherche comme principale fonctionnalité ; on appelle alors « moteur de recherche » le site lui-même. Ce sont des instruments de recherche sur le web sans intervention humaine, ce qui les distingue des annuaires. Ils sont basés sur des « robots », encore appelés « bots », « spiders «, « crawlers » ou « agents », qui parcourent les sites à intervalles réguliers et de façon automatique pour découvrir de nouvelles adresses (URL). Ils suivent les liens hypertextes qui relient les pages les unes aux autres, les uns après les autres. Chaque page identifiée est alors indexée dans une base de données, accessible ensuite par les internautes à partir de mots-clés. C'est par abus de langage qu'on appelle également « moteurs de recherche » des sites web proposant des annuaires de sites web : dans ce cas, ce sont des instruments de recherche élaborés par des personnes qui répertorient et classifient des sites web jugés dignes d'intérêt, et non des robots d'indexation. Les moteurs de recherche ne s'appliquent pas qu'à Internet : certains moteurs sont des logiciels installés sur un ordinateur personnel. Ce sont des moteurs dits « de bureau » qui combinent la recherche parmi les fichiers stockés sur le PC et la recherche parmi les sites Web — on peut citer par exemple Exalead Desktop, Google Desktop et Copernic Desktop Search, Windex Server, etc. On trouve également des métamoteurs, c'est-à-dire des sites web où une même recherche est lancée simultanément sur plusieurs moteurs de recherche, les résultats étant ensuite fusionnés pour être présentés à l'internaute. On peut citer dans cette catégorie Ixquick, Mamma, Kartoo, Framabee ou Lilo.","timestamp":"2018-05-30T15:15:00Z","_type":"frwiki"}} -{"id":"gawiki_1","fields":{"title_ga":"Inneall cuardaigh","text_ga":"Acmhainn ar an ngréasán domhanda atá insroichte le brabhsálaí Gréasáin, a chabhraíonn leis an úsáideoir ionaid is eolas a aimsiú. Bíonn na hinnill cuardaigh (Yahoo, Lycos, Google, Ask Jeeves) ag cuardach tríd an ngréasán an t-am ar fad, ag tógáil innéacsanna ábhar éagsúla — mar shampla, ag aimsiú teidil, fotheidil, eochairfhocail is céadlínte cáipéisí. Uaidh sin, is féidir cuid mhaith cáipéisí éagsúla ar ábhar ar leith a aisghabháil. Déanann an cuardach leanúnach cinnte de go bhfuil na hinnéacsanna suas chun dáta. Mar sin féin, aisghabhann na hinnill an-chuid cháipéisí nach mbaineann le hábhar, agus tá an-iarracht ar siúl an t-am ar fad iad a fheabhsú.","timestamp":"2013-10-27T18:17:00Z","_type":"gawiki"}} -{"id":"glwiki_1","fields":{"title_gl":"Motor de busca","text_gl":"Un motor de busca ou buscador é un sistema informático que procura arquivos almacenados en servidores web, un exemplo son os buscadores de internet (algúns buscan só na Web pero outros buscan ademais en News, Gopher, FTP etc.) cando lles pedimos información sobre algún tema. As procuras fanse con palabras clave ou con árbores xerárquicas por temas; o resultado da procura é unha listaxe de direccións Web nas que se mencionan temas relacionados coas palabras clave buscadas.","timestamp":"2016-10-31T13:33:00Z","_type":"glwiki"}} -{"id":"guwiki_1","fields":{"title_gu":"વેબ શોધ એન્જીન","text_gu":"વેબ શોધ એન્જિન એ વર્લ્ડ વાઈડ વેબ (World Wide Web) પર વિવિધ માહિતી શોધવા માટે ઉપયોગમાં લેવામાં આવે છે. શોધ લીસ્ટને સામાન્ય રીતે યાદીમાં દર્શાવવામાં આવે છે અને જેને સામાન્ય રીતે હીટ્સ કહેવામાં આવે છે. જે માહિતી મળે છે તેમાં વેબ પૃષ્ઠ (web page), છબીઓ, માહિતી અને અન્ય પ્રકારની ફાઈલો હોય છે. કેટલાક શોધ એન્જિનો ન્યુઝબુક, ડેટાબેઝ અને અન્ય પ્રકારની ઓપન ડીરેક્ટરી (open directories)ઓની વિગતો પણ આપે છે. વ્યકિતઓ દ્વારા દુરસ્ત થતી વેબ ડાયરેક્ટરીઝ (Web directories)થી અલગ રીતે, શોધ એન્જિન ઍલ્ગરિધમનો અથવા ઍલ્ગરિધમ (algorithmic) અને માનવીય બાબતોના મિક્ષણનો ઉપયોગ કરે છે.","timestamp":"2013-04-04T19:28:00Z","_type":"guwiki"}} -{"id":"hiwiki_1","fields":{"title_hi":"खोज इंजन","text_hi":"ऐसे कम्प्यूटर प्रोग्राम खोजी इंजन (search engine) कहलाते हैं जो किसी कम्प्यूटर सिस्टम पर भण्डारित सूचना में से वांछित सूचना को ढूढ निकालते हैं। ये इंजन प्राप्त परिणामों को प्रायः एक सूची के रूप में प्रस्तुत करते हैं जिससे वांछित सूचना की प्रकृति और उसकी स्थिति का पता चलता है। खोजी इंजन किसी सूचना तक अपेक्षाकृत बहुत कम समय में पहुँचने में हमारी सहायता करते हैं। वे 'सूचना ओवरलोड' से भी हमे बचाते हैं। खोजी इंजन का सबसे प्रचलित रूप 'वेब खोजी इंजन' है जो वर्ल्ड वाइड वेब पर सूचना खोजने के लिये प्रयुक्त होता है।","timestamp":"2017-10-19T20:09:00Z","_type":"hiwiki"}} -{"id":"huwiki_1","fields":{"title_hu":"Keresőmotor","text_hu":"A keresőmotor az informatikában egy program vagy alkalmazás, amely bizonyos feltételeknek (többnyire egy szónak vagy kifejezésnek) megfelelő információkat keres valamilyen számítógépes környezetben. Ez a cikk a World Wide Weben (és esetleg az internet más részein, például a Useneten) kereső alkalmazásokról szól, a keresőmotor kifejezés önmagában általában ezekre vonatkozik. Másfajta keresőmotorokra példák a vállalati keresőmotorok, amik egy intraneten, és a személyi keresőmotorok, amik egy személyi számítógép állományai között keresnek.","timestamp":"2018-05-15T20:40:00Z","_type":"huwiki"}} -{"id":"hywiki_1","fields":{"title_hy":"Որոնողական համակարգ","text_hy":"Որոնողական համակարգը գործիք է, որը նախատեսված է համապատասխան բառերով Համաշխարհային ցանցում որոնումներ կատարելու համար։ Ստեղծված է համացանցում և FTP սերվերներում ինֆորմացիա փնտրելու համար։ Փնտրված արդյունքները ընդհանրապես ներկայացվում են արդյունքների ցանկում և սովորաբար կոչվում են նպատակակակետ, հիթ։ Ինֆորմացիան կարող է բաղկացած լինել վեբ էջերից, նկարներից, ինֆորմացիաներից և այլ տիպի ֆայլերից ու տվյալներից։ Այն կարող է օգտագործվել տարբեր տեսակի տեղեկատվություն որոնելու համար, ներառյալ՝ կայքեր, ֆորումներ, նկարներ, վիդեոներ, ֆայլեր և այլն։ Որոշ կայքեր արդեն իրենցից ներկայացնում են ինչ-որ որոնողական համակարգ, օրինակ՝ Dailymotion, YouTube և Google Videos ինտերնետում տեղադրված տեսահոլովակների որոնողական կայքեր են։ Որոնողական կայքը բաղկացած է \"ռոբոտներից\", որոնց անվանում են նաև bot, spider, crawler, որոնք ավտոմատ կերպով, առանց մարդկային միջամտության պարբերաբար հետազոտում են կայքերը։ Որոնողական կայքերը հետևում են հղումներին, որոնք կապված լինելով իրար հետ ինդեքսավորում է յուրաքանչյուր էջ տվյալների բազայում՝ հետագայում բանալի բառերի օգնությամբ դառնալով հասանելի ինտերնետից օգտվողների համար։ Սխալմամբ, որոնողական կայքեր են անվանում նաև այն կայքերը, որոնք իրենցից ներկայացնում են կայքային տեղեկատուներ։ Այս կայքերում ուշադրության արժանի կայքերը ցուցակագրվում և դասակարգվում են մարդկային ռեսուրսների շնորհիվ, այլ ոչ թե բոտերի կամ ռոբետների միջոցով։ Այդ կայքերից կարելի է նշել օրինակ՝ Yahoo!։ Yahoo!-ի որոնողական կայքը գտնվում է այստեղ։ Բոլոր որոնողական համակարգերը նախատեսված են ինտերնետում որոնում իրականացնելու համար, սակայն կան որոշ որոնողական համակարգերի տարատեսակներ, որոնք համակարգչային ծրագրեր են և հետևաբար տեղակայվում են համակարգչի մեջ։ Այս համակարգերը կոչվում են desktop։ Վերջիներս հնարավորություն են տալիս որոնելու թե համակարգչի մեջ կուտակված ֆայլեը, թե կայքերում տեղադրված ռեսուրսները։ Այդ ծրագրերից ամենահայտնիներն են՝ Exalead Desktop, Copernic Desktop Search Գոյություն ունեն նաև մետա-որոնողական համակարգեր, այսինքն կայքեր, որ նույն որոնումը կատարում են միաժամանակ տարբեր որոնողական կայքերի միջնորդությամբ։ Որոնման արդյունքները հետո դասակարգվում են որպեսզի ներկայացվեն օգտագործողին։ Մետա-որոնողական համակարգերի շարքից կարելի է թվարկել օրինակ՝ Mamma և Kartoo։","timestamp":"2017-11-20T17:47:00Z","_type":"hywiki"}} -{"id":"idwiki_1","fields":{"title_id":"Mesin pencari web","text_id":"Mesin pencari web atau mesin telusur web (bahasa Inggris: web search engine) adalah program komputer yang dirancang untuk melakukan pencarian atas berkas-berkas yang tersimpan dalam layanan www, ftp, publikasi milis, ataupun news group dalam sebuah ataupun sejumlah komputer peladen dalam suatu jaringan. Mesin pencari merupakan perangkat penelusur informasi dari dokumen-dokumen yang tersedia. Hasil pencarian umumnya ditampilkan dalam bentuk daftar yang seringkali diurutkan menurut tingkat akurasi ataupun rasio pengunjung atas suatu berkas yang disebut sebagai hits. Informasi yang menjadi target pencarian bisa terdapat dalam berbagai macam jenis berkas seperti halaman situs web, gambar, ataupun jenis-jenis berkas lainnya. Beberapa mesin pencari juga diketahui melakukan pengumpulan informasi atas data yang tersimpan dalam suatu basis data ataupun direktori web. Sebagian besar mesin pencari dijalankan oleh perusahaan swasta yang menggunakan algoritme kepemilikan dan basis data tertutup, di antaranya yang paling populer adalah safari Google (MSN Search dan Yahoo!). Telah ada beberapa upaya menciptakan mesin pencari dengan sumber terbuka (open source), contohnya adalah Htdig, Nutch, Egothor dan OpenFTS.","timestamp":"2017-11-20T17:47:00Z","_type":"idwiki"}} -{"id":"itwiki_1","fields":{"title_it":"Motore di ricerca","text_it":"Nell'ambito delle tecnologie di Internet, un motore di ricerca (in inglese search engine) è un sistema automatico che, su richiesta, analizza un insieme di dati (spesso da esso stesso raccolti) e restituisce un indice dei contenuti disponibili[1] classificandoli in modo automatico in base a formule statistico-matematiche che ne indichino il grado di rilevanza data una determinata chiave di ricerca. Uno dei campi in cui i motori di ricerca trovano maggiore utilizzo è quello dell'information retrieval e nel web. I motori di ricerca più utilizzati nel 2017 sono stati: Google, Bing, Baidu, Qwant, Yandex, Ecosia, DuckDuckGo.","timestamp":"2018-07-16T12:20:00Z","_type":"itwiki"}} -{"id":"jawiki_1","fields":{"title_ja":"検索エンジン","text_ja":"検索エンジン(けんさくエンジン、英語: search engine)は、狭義にはインターネットに存在する情報(ウェブページ、ウェブサイト、画像ファイル、ネットニュースなど)を検索する機能およびそのプログラム。インターネットの普及初期には、検索としての機能のみを提供していたウェブサイトそのものを検索エンジンと呼んだが、現在では様々なサービスが加わったポータルサイト化が進んだため、検索をサービスの一つとして提供するウェブサイトを単に検索サイトと呼ぶことはなくなっている。広義には、インターネットに限定せず情報を検索するシステム全般を含む。狭義の検索エンジンは、ロボット型検索エンジン、ディレクトリ型検索エンジン、メタ検索エンジンなどに分類される。広義の検索エンジンとしては、ある特定のウェブサイト内に登録されているテキスト情報の全文検索機能を備えたソフトウェア(全文検索システム)等がある。検索エンジンは、検索窓と呼ばれるボックスにキーワードを入力して検索をかけるもので、全文検索が可能なものと不可能なものとがある。検索サイトを一般に「検索エンジン」と呼ぶことはあるが、厳密には検索サイト自体は検索エンジンでない。","timestamp":"2018-05-30T00:52:00Z","_type":"jawiki"}} -{"id":"knwiki_1","fields":{"title_kn":"ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ","text_kn":"ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ ಎಂದರೆ World Wide Webನಲ್ಲಿ ಮಾಹಿತಿ ಹುಡುಕುವುದಕ್ಕಾಗಿ ವಿನ್ಯಾಸಗೊಳಿಸಲಾದ ಒಂದು ಸಾಧನ. ಹುಡುಕಾಟದ ಫಲಿತಾಂಶಗಳನ್ನು ಸಾಮಾನ್ಯವಾಗಿ ಒಂದು ಪಟ್ಟಿಯ ರೂಪದಲ್ಲಿ ಪ್ರಸ್ತುತಪಡಿಸಲಾಗುತ್ತದೆ ಮತ್ತು ಇವನ್ನು ’ಹಿಟ್ಸ್’ ಎಂದು ಕರೆಯಲಾಗುತ್ತದೆ. ಈ ಮಾಹಿತಿಯು ಅನೇಕ ಜಾಲ ಪುಟಗಳು, ಚಿತ್ರಗಳು, ಮಾಹಿತಿ ಹಾಗೂ ಇತರೆ ಕಡತಗಳನ್ನು ಹೊಂದಿರಬಹುದು. ಕೆಲವು ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಬೇರೆ ದತ್ತಸಂಚಯಗಳು ಅಥವಾ ಮುಕ್ತ ಮಾಹಿತಿ ಸೂಚಿಗಳಿಂದ ದತ್ತಾಂಶಗಳ ಗಣಿಗಾರಿಕೆ ಮಾಡಿ ಹೊರತೆಗೆಯುತ್ತವೆ. ಜಾಲ ಮಾಹಿತಿಸೂಚಿಗಳನ್ನು ಸಂಬಂಧಿಸಿದ ಸಂಪಾದಕರು ನಿರ್ವಹಿಸಿದರೆ, ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಗಣನಪದ್ಧತಿಯ ಮೂಲಕ ಅಥವಾ ಗಣನಪದ್ಧತಿ ಮತ್ತು ಮಾನವ ಹೂಡುವಳಿಯ ಮಿಶ್ರಣದ ಮುಖಾಂತರ ಕಾರ್ಯನಿರ್ವಹಿಸುತ್ತವೆ.","timestamp":"2017-10-03T14:13:00Z","_type":"knwiki"}} -{"id":"kowiki_1","fields":{"title_cjk":"검색 엔진","text_cjk":"검색 엔진은 컴퓨터 시스템에 저장된 정보를 찾아주는 것을 도와주도록 설계된 정보 검색 시스템이다. 이러한 검색 결과는 목록으로 표현되는 것이 보통이다. 검색 엔진을 사용하면 정보를 찾는데 필요한 시간을 최소화할 수 있다. 가장 눈에 띄는 형태의 공용 검색 엔진으로는 웹 검색 엔진이 있으며 월드 와이드 웹에서 정보를 찾아준다.","timestamp":"2017-11-19T12:50:00Z","_type":"kowiki"}} -{"id":"mlwiki_1","fields":{"title_ml":"വെബ് സെർച്ച് എഞ്ചിൻ","text_ml":"വേൾഡ് വൈഡ് വെബ്ബിലുള്ള വിവരങ്ങൾ തിരയാനുള്ള ഒരു ഉപാധിയാണ്‌ വെബ് സെർച്ച് എഞ്ചിൻ അഥവാ സെർച്ച് എഞ്ചിൻ. തിരച്ചിൽ ഫലങ്ങൾ സാധാരണായായി ഒരു പട്ടികയായി നൽകുന്നു, തിരച്ചിൽ ഫലങ്ങളെ ഹിറ്റുകൾ എന്നാണ്‌ വിളിച്ചുവരുന്നത്[അവലംബം ആവശ്യമാണ്]. തിരച്ചിൽ ഫലങ്ങളിൽ വെബ് പേജുകൾ, ചിത്രങ്ങൾ, വിവരങ്ങൾ, വെബ്ബിലുള്ള മറ്റ് ഫയൽ തരങ്ങൾ എന്നിവ ഉൾപ്പെടാം. അൽഗോരിതങ്ങൾ ഉപയോഗിച്ചാണ് സെർച്ച് എഞ്ചിനുകൾ പ്രവർത്തിക്കുന്നത്.","timestamp":"2010-05-05T15:06:00Z","_type":"mlwiki"}} -{"id":"nlwiki_1","fields":{"title_nl":"Zoekmachine","text_nl":"Een zoekmachine is een computerprogramma waarmee informatie kan worden gezocht in een bepaalde collectie; dit kan een bibliotheek, het internet, of een persoonlijke verzameling zijn. Zonder nadere aanduiding wordt meestal een webdienst bedoeld waarmee met behulp van vrije trefwoorden volledige tekst (full text) kan worden gezocht in het gehele wereldwijde web. In tegenstelling tot startpagina's of webgidsen is er geen of zeer weinig menselijke tussenkomst nodig; het bezoeken van de webpagina's en het sorteren van de rangschikkingen gebeurt met behulp van een algoritme. Google is wereldwijd de meest gebruikte zoekmachine, andere populaire zoekmachines zijn Yahoo!, Bing en Baidu.","timestamp":"2018-05-07T11:05:00Z","_type":"nlwiki"}} -{"id":"nowiki_1","fields":{"title_no":"Søkemotor","text_no":"En søkemotor er en type programvare som leter frem informasjon fra Internett (nettsider eller andre nettressurser) eller begrenset til et datasystem, der informasjonen samsvarer med et gitt søk, og rangerer treffene etter hva den oppfatter som mest relevant. Typisk ligger søkemotoren tilgjengelig som et nettsted, der brukeren legger inn søkeord ev. sammen med filterinnstillinger, og treffene vises gjerne som klikkbare lenker. Søkemotoren kan enten gjøre søk på hele Internett (for eksempel Google, Bing, Kvasir og Yahoo!), innenfor et bestemt nettsted (for eksempel søk innenfor VGs nettavis), eller innenfor et bestemt tema (f.eks. Kelkoo, som søker etter priser på produkter, og Picsearch, som søker etter bilder). En bedrift kan også sette opp en intern bedrifts-søkemotor for å få enklere tilgang til alle dokumenter og databaser i bedriften.","timestamp":"2018-02-05T14:15:00Z","_type":"nowiki"}} -{"id":"pswiki_1","fields":{"title_ps":"انټرنټ لټوونکی ماشين","text_ps":"نټرنټ د معلوماتو يوه داسې پراخه نړۍ ده چې يوه پوله هم نه لري. هره ثانيه په زرگونو معلوماتي توکي په کې ورځای کېږي، خو بيا هم د ډکېدو کومه اندېښنه نه رامنځته کېږي. حيرانوونکې خبره بيا دا ده چې دغه ټول معلومات په داسې مهارت سره په دغه نړۍ کې ځای شوي دي، چې سړی يې د سترگو په رپ کې د نړۍ په هر گوټ کې ترلاسه کولای شي. د کيبورډ په يو دوو تڼيو زور کولو او د موږك په يو دوو کليکونو سره خپلو ټولو پوښتنو ته ځواب موندلای شئ. ټول معلومات په ځانگړو انټرنټ پاڼو کې خوندي وي، نو که سړي ته د يوې پاڼې پته معلومه وي نو سم له لاسه به دغه پاڼه د انټرنټ پاڼو په کتونکي پروگرام کې پرانيزي، خو که سړی بيا يو معلومات غواړي او د هغې پاڼې پته ورسره نه وي، چې دغه ځانگړي معلومات په كې ځای شوي دي، نو بيا سړی يوه داسې پياوړي ځواک ته اړتيا لري، چې د سترگو په رپ کې ټول انټرنټ چاڼ کړي او دغه ځانگړي معلومات راوباسي. له نېکه مرغه د دغه ځواک غم خوړل شوی دی او ډېرInternet Search Engine انټرنټ لټوونکي ماشينونه جوړ کړای شوي دي، چې په وړيا توگه ټول انټرنټ تر ثانيو هم په لږ وخت کې چاڼ کوي او زموږ د خوښې معلومات راښکاره کوي. دغو ماشينونو ته سړی يوه ځانگړې کليمه ورکوي او هغوی ټول انټرنټ په دغې وركړل شوې کلمې پسې لټوي او هر دقيق معلومات چې لاسته ورځي، نو د کمپيوټر پر پرده يې راښکاره کوي. د دغو ماشينونو په ډله کې يو پياوړی ماشين د Google په نوم دی. د نوموړي ماشين بنسټ په ١٩٩٨م کال کې د متحدو ايالاتو د Standford پوهنتون دوو محصلينو Larry Page او Sergey Brin کښېښود. د دغه ماشين خدمات سړی د www.google.com په انټرنټ پاڼه کې کارولای شي. نوموړی ماشين د نړۍ په گڼ شمېر ژبو باندې خدمات وړاندې کوي او داسې چټک او دقيق لټون کوي چې د انټرنټ نور ډېر غښتلي ماشينونه ورته گوته پر غاښ پاتې دي. گوگل په ټوله نړۍ کې کارول کېږي او تر نيمي ثانيي هم په لنډ وخت کې په ميليارډونو انټرنټ پاڼې چاڼ کوي او خپلو کاروونکو ته په پرتله ييزه توگه دقيق معلومات راباسي. گوگل په يوه ورځ کې څه كمُ ٢٠٠ ميليونه پوښتنې ځوابوي. دا ( گوگل) تورى خپله د يو امريکايي رياضيپوه د وراره له خوا په لومړي ځل د يوې لوبې لپاره کارول شوی و. هغه دغه تورى د يو سلو صفرونو ( 1000?.) غوندې لوی عدد ته د نوم په توگه کاراوه. دغه نوم د نوموړي شرکت د دغه توان ښكارندوى دى، چې په لنډ وخت کې په لويه کچه پوښتنو ته ځواب ورکوي او معلومات لټوي. سړی چې د گوگل چټکتيا او دقيقوالي ته ځير شي، نو دا پوښته راپورته کېږي چې د دې ماشين شا ته به څومره پرمختللي کمپيوټرونه او پياوړی تخنيک پټ وي. خو اصلاً د گوگل شا ته په يوه لوی جال کې د منځنۍ بيې کمپيوټرونه سره نښلول شوي دي . په دې توگه په زرگونو کمپيوټرونه هممهاله په کار بوخت وي، چې په ترڅ کې يې د معلوماتو لټول او چاڼ کول چټکتيا مومي. د يوې پوښتنې له اخيستلو څخه راواخله معلوماتو تر لټولو او بيا د دقيقوالي له مخې په يوه ځانگړي طرز بېرته کاروونکي يا پوښتونكي تر ښوولو پورې ټولې چارې د درېيو Software پروگرامونه په لاس کې دي، چې په دغه زرگونو کمپيوټرونو کې ځای پر ځای شوي دي.","timestamp":"2015-12-15T18:53:00Z","_type":"pswiki"}} -{"id":"ptwiki_1","fields":{"title_pt":"Motor de busca","text_pt":"Motor de pesquisa (português europeu) ou ferramenta de busca (português brasileiro) ou buscador (em inglês: search engine) é um programa desenhado para procurar palavras-chave fornecidas pelo utilizador em documentos e bases de dados. No contexto da internet, um motor de pesquisa permite procurar palavras-chave em documentos alojados na world wide web, como aqueles que se encontram armazenados em websites. Os motores de busca surgiram logo após o aparecimento da Internet, com a intenção de prestar um serviço extremamente importante: a busca de qualquer informação na rede, apresentando os resultados de uma forma organizada, e também com a proposta de fazer isto de uma maneira rápida e eficiente. A partir deste preceito básico, diversas empresas se desenvolveram, chegando algumas a valer milhões de dólares. Entre as maiores empresas encontram-se o Google, o Yahoo, o Bing, o Lycos, o Cadê e, mais recentemente, a Amazon.com com o seu mecanismo de busca A9 porém inativo. Os buscadores se mostraram imprescindíveis para o fluxo de acesso e a conquista novos visitantes. Antes do advento da Web, havia sistemas para outros protocolos ou usos, como o Archie para sites FTP anônimos e o Veronica para o Gopher (protocolo de redes de computadores que foi desenhado para indexar repositórios de documentos na Internet, baseado-se em menus).","timestamp":"2017-11-09T14:38:00Z","_type":"ptwiki"}} -{"id":"rowiki_1","fields":{"title_ro":"Motor de căutare","text_ro":"Un motor de căutare este un program apelabil căutător, care accesează Internetul în mod automat și frecvent și care stochează titlul, cuvinte cheie și, parțial, chiar conținutul paginilor web într-o bază de date. În momentul în care un utilizator apelează la un motor de căutare pentru a găsi o informație, o anumită frază sau un cuvânt, motorul de căutare se va uita în această bază de date și, în funcție de anumite criterii de prioritate, va crea și afișa o listă de rezultate (engleză: hit list ).","timestamp":"2018-06-12T08:59:00Z","_type":"rowiki"}} -{"id":"ruwiki_1","fields":{"title_ru":"Поисковая машина","text_ru":"Поисковая машина (поиско́вый движо́к) — комплекс программ, предназначенный для поиска информации. Обычно является частью поисковой системы. Основными критериями качества работы поисковой машины являются релевантность (степень соответствия запроса и найденного, т.е. уместность результата), полнота индекса, учёт морфологии языка.","timestamp":"2017-03-22T01:16:00Z","_type":"ruwiki"}} -{"id":"svwiki_1","fields":{"title_sv":"Söktjänst","text_sv":"En söktjänst är en webbplats som gör det möjligt att söka efter innehåll på Internet. Söktjänsterna använder sökmotorer, även kallade sökrobotar, för att upptäcka, hämta in och indexera webbsidor.","timestamp":"2018-08-16T22:13:00Z","_type":"svwiki"}} -{"id":"tawiki_1","fields":{"title_ta":"தேடுபொறி","text_ta":"தேடுபொறி அல்லது தேடற்பொறி என்பது ஒரு கணினி நிரலாகும். இது இணையத்தில் குவிந்து கிடக்கும் தகவல்களில் இருந்தோ கணினியில் இருக்கும் தகவல்களில் இருந்தோ நமக்குத் தேவையான தகவலைப்பெற உதவுகின்றது. பொதுவாகப் பாவனையாளர்கள் ஒரு விடயம் சம்பந்தமாகத் தேடுதலை ஒரு சொல்லை வைத்து தேடுவார்கள். தேடுபொறிகள் சுட்டிகளைப் பயன்படுத்தி விரைவான தேடலை மேற்கொள்ளும். தேடுபொறிகள் என்பது பொதுவாக இணையத் தேடுபொறிகளை அல்லது இணையத் தேடற்பொறிகளையே குறிக்கும். வேறுசில தேடுபொறிகள் உள்ளூர் வலையமைப்பை மாத்திரமே தேடும். இணைய தேடு பொறிகள் பல பில்லியன் பக்கங்களில் இருந்து நமக்குத் தேவையான மிகப் பொருத்தமான பக்கங்களைத் தேடித் தரும். வேறுசில தேடற்பொறிகள் செய்திக் குழுக்கள், தகவற்தளங்கள், திறந்த இணையத்தளங்களைப் பட்டியலிடும் DMOZ.org போன்ற இணையத் தளங்களைத் தேடும். மனிதர்களால் எழுதப்பட்ட இணையத் தளங்களைப் பட்டியலிடும் தளங்களைப் போன்றல்லாது தேடு பொறிகள் அல்காரிதங்களைப் பாவித்துத் தேடல்களை மேற்கொள்ளும். வேறு சில தேடற்பொறிகளோ தமது இடைமுகத்தை வழங்கினாலும் உண்மையில் வேறுசில தேடுபொறிகளே தேடலை மேற்கொள்ளும். ஆரம்ப காலத்தில் ASCII முறை வரியுருக்களை கொண்டே தேடு சொற்களை உள்ளிட முடிந்தது. தற்போது ஒருங்குறி எழுத்துக்குறிமுறையை பல தேடுபொறிகளும் ஆதரிப்பதால் ஆங்கிலத்தில் மட்டுமல்லாது உலக மொழிகள் அனைத்திலும் அவ்வம் மொழிப்பக்கங்களை தேடிப்பெறக்கூடியதாகவுள்ளது.","timestamp":"2017-12-24T10:30:00Z","_type":"tawiki"}} -{"id":"tewiki_1","fields":{"title_te":"వెబ్ శోధనా యంత్రం","text_te":"వెబ్ శోధన యంత్రం అనేది వరల్డ్ వైడ్ వెబ్/ప్రపంచ వ్యాప్త వెబ్లో సమాచారాన్ని శోదించటానికి తయారుచేసిన ఒక సాధనం. శోధన ఫలితాలు సాధారణంగా ఒక జాబితాలో ఇవ్వబడతాయి మరియు అవి సాధారణంగా హిట్స్ అని పిలువబడతాయి. ఆ సమాచారం వెబ్ పేజీలు, చిత్రాలు, సమాచారం మరియు ఇతర రకాలైన జాబితాలను కలిగి ఉంటుంది.కొన్ని శోధనా యంత్రాలు డేటా బేస్ లు లేదా ఓపెన్ డైరెక్టరీలలో అందుబాటులో ఉన్న సమాచారాన్ని కూడా వెలికితీస్తాయి. మానవ సంపాదకులచే నిర్వహించబడే క్రమపరిచిన వెబ్ డైరెక్టరీల లా కాకుండా, శోధనా యంత్రాలు సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి ద్వారా లేదా సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి మరియు మానవ శక్తిల మిశ్రమంతో పనిచేస్తాయి.","timestamp":"2017-06-19T11:22:00Z","_type":"tewiki"}} -{"id":"thwiki_1","fields":{"title_th":"เสิร์ชเอนจิน","text_th":"เสิร์ชเอนจิน (search engine) หรือ โปรแกรมค้นหา คือ โปรแกรมที่ช่วยในการสืบค้นหาข้อมูล โดยเฉพาะข้อมูลบนอินเทอร์เน็ต โดยครอบคลุมทั้งข้อความ รูปภาพ ภาพเคลื่อนไหว เพลง ซอฟต์แวร์ แผนที่ ข้อมูลบุคคล กลุ่มข่าว และอื่น ๆ ซึ่งแตกต่างกันไปแล้วแต่โปรแกรมหรือผู้ให้บริการแต่ละราย. เสิร์ชเอนจินส่วนใหญ่จะค้นหาข้อมูลจากคำสำคัญ (คีย์เวิร์ด) ที่ผู้ใช้ป้อนเข้าไป จากนั้นก็จะแสดงรายการผลลัพธ์ที่มันคิดว่าผู้ใช้น่าจะต้องการขึ้นมา ในปัจจุบัน เสิร์ชเอนจินบางตัว เช่น กูเกิล จะบันทึกประวัติการค้นหาและการเลือกผลลัพธ์ของผู้ใช้ไว้ด้วย และจะนำประวัติที่บันทึกไว้นั้น มาช่วยกรองผลลัพธ์ในการค้นหาครั้งต่อ ๆ ไป","timestamp":"2016-06-18T11:06:00Z","_type":"thwiki"}} -{"id":"trwiki_1","fields":{"title_tr":"Arama motoru","text_tr":"Arama motoru, İnternet üzerinde bulunan içeriği aramak için kullanılan bir mekanizmadır. Üç bileşenden oluşur: web robotu, arama indeksi ve kullanıcı arabirimi. Ancak arama sonuçları genellikle sık tıklanan internet sayfalarından oluşan bir liste olarak verilmektedir.","timestamp":"2018-03-13T17:37:00Z","_type":"trwiki"}} -{"id":"zhwiki_1","fields":{"title_zh":"搜索引擎","text_zh":"搜索引擎(英语:search engine)是一种信息检索系统,旨在协助搜索存储在计算机系统中的信息。搜索结果一般被称为“hits”,通常会以表单的形式列出。网络搜索引擎是最常见、公开的一种搜索引擎,其功能为搜索万维网上储存的信息.","timestamp":"2018-08-27T05:47:00Z","_type":"zhwiki"}} diff --git a/example/wiki_doc_arwiki_1.json b/example/wiki_doc_arwiki_1.json deleted file mode 100644 index fdbdac0..0000000 --- a/example/wiki_doc_arwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "arwiki_1", - "fields": { - "title_ar": "محرك بحث", - "text_ar": "محرك البحث (بالإنجليزية: Search engine) هو نظام لإسترجاع المعلومات صمم للمساعدة على البحث عن المعلومات المخزنة على أي نظام حاسوبي. تعرض نتائج البحث عادة على شكل قائمة لأماكن تواجد المعلومات ومرتبة وفق معايير معينة. تسمح محركات البحث باختصار مدة البحث والتغلب على مشكلة أحجام البيانات المتصاعدة (إغراق معلوماتي).", - "timestamp": "2018-03-25T18:04:00Z", - "_type": "arwiki" - } -} diff --git a/example/wiki_doc_bgwiki_1.json b/example/wiki_doc_bgwiki_1.json deleted file mode 100644 index 3ad2735..0000000 --- a/example/wiki_doc_bgwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "bgwiki_1", - "fields": { - "title_bg": "Търсачка", - "text_bg": "Търсачка или търсеща машина (на английски: Web search engine) е специализиран софтуер за извличане на информация, съхранена в компютърна система или мрежа. Това може да е персонален компютър, Интернет, корпоративна мрежа и т.н. Без допълнителни уточнения, най-често под търсачка се разбира уеб(-)търсачка, която търси в Интернет. Други видове търсачки са корпоративните търсачки, които търсят в интранет мрежите, личните търсачки – за индивидуалните компютри и мобилните търсачки. В търсачката потребителят (търсещият) прави запитване за съдържание, отговарящо на определен критерий (обикновено такъв, който съдържа определени думи и фрази). В резултат се получават списък от точки, които отговарят, пълно или частично, на този критерий. Търсачките обикновено използват редовно подновявани индекси, за да оперират бързо и ефикасно. Някои търсачки също търсят в информацията, която е на разположение в нюзгрупите и други големи бази данни. За разлика от Уеб директориите, които се поддържат от хора редактори, търсачките оперират алгоритмично. Повечето Интернет търсачки са притежавани от различни корпорации.", - "timestamp": "2018-07-11T11:03:00Z", - "_type": "bgwiki" - } -} diff --git a/example/wiki_doc_cawiki_1.json b/example/wiki_doc_cawiki_1.json deleted file mode 100644 index ffb67e6..0000000 --- a/example/wiki_doc_cawiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "cawiki_1", - "fields": { - "title_ca": "Motor de cerca", - "text_ca": "Un motor de cerca o de recerca o bé cercador és un programa informàtic dissenyat per ajudar a trobar informació emmagatzemada en un sistema informàtic com ara una xarxa, Internet, un servidor o un ordinador personal. L'objectiu principal és el de trobar altres programes informàtics, pàgines web i documents, entre d'altres. A partir d'una determinada paraula o paraules o una determinada frase l'usuari demana un contingut sota un criteri determinat i retorna una llista de referències que compleixin aquest criteri. El procés es realitza a través de les metadades, vies per comunicar informació que utilitzen els motors per cada cerca. Els índex que utilitzen els cercadors sempre estan actualitzats a través d'un robot web per generar rapidesa i eficàcia en la recerca. Els directoris, en canvi, són gestionats per editors humans.", - "timestamp": "2018-07-09T18:07:00Z", - "_type": "cawiki" - } -} diff --git a/example/wiki_doc_cswiki_1.json b/example/wiki_doc_cswiki_1.json deleted file mode 100644 index 89c994a..0000000 --- a/example/wiki_doc_cswiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "cswiki_1", - "fields": { - "title_cs": "Vyhledávač", - "text_cs": "Vyhledávač je počítačový systém či program, který umožňuje uživateli zadat nějaký libovolný nebo specifikovaný vyhledávaný výraz a získat z velkého objemu dat informace, které jsou v souladu s tímto dotazem. Jako vyhledávač se označují i ​​webové stránky, jejichž hlavní funkcí je poskytování takového systému či programu. Jako internetový vyhledávač se označuje buď vyhledávač, na který se přistupuje přes internet, nebo vyhledávač, jehož zdrojem vyhledávání je internet (tj. WWW, Usenet apod.). Jako online vyhledávač se označuje vyhledávač, při jehož výkonu činnosti dochází k výměně dat v rámci nějaké počítačové sítě, nejčastěji to je internetový vyhledávač. Fulltextový vyhledávač je vyhedávač, který vykonává fulltextové vyhledávání.", - "timestamp": "2017-11-10T21:59:00Z", - "_type": "cswiki" - } -} diff --git a/example/wiki_doc_dawiki_1.json b/example/wiki_doc_dawiki_1.json deleted file mode 100644 index ff1ee22..0000000 --- a/example/wiki_doc_dawiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "dawiki_1", - "fields": { - "title_da": "Søgemaskine", - "text_da": "En søgemaskine er en applikation til at hjælpe en bruger med at finde information. Det kan f.eks. være at finde filer med bestemte data (f.eks. ord), gemt i en computers hukommelse, for eksempel via World Wide Web (kaldes så en websøgemaskine). Ofte bruges søgemaskine fejlagtigt om linkkataloger eller Netguider.", - "timestamp": "2017-09-04T01:54:00Z", - "_type": "dawiki" - } -} diff --git a/example/wiki_doc_dewiki_1.json b/example/wiki_doc_dewiki_1.json deleted file mode 100644 index c5f0a83..0000000 --- a/example/wiki_doc_dewiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "dewiki_1", - "fields": { - "title_de": "Suchmaschine", - "text_de": "Eine Suchmaschine ist ein Programm zur Recherche von Dokumenten, die in einem Computer oder einem Computernetzwerk wie z. B. dem World Wide Web gespeichert sind. Internet-Suchmaschinen haben ihren Ursprung in Information-Retrieval-Systemen. Sie erstellen einen Schlüsselwort-Index für die Dokumentbasis, um Suchanfragen über Schlüsselwörter mit einer nach Relevanz geordneten Trefferliste zu beantworten. Nach Eingabe eines Suchbegriffs liefert eine Suchmaschine eine Liste von Verweisen auf möglicherweise relevante Dokumente, meistens dargestellt mit Titel und einem kurzen Auszug des jeweiligen Dokuments. Dabei können verschiedene Suchverfahren Anwendung finden.", - "timestamp": "2017-09-04T01:54:00Z", - "_type": "dewiki" - } -} diff --git a/example/wiki_doc_elwiki_1.json b/example/wiki_doc_elwiki_1.json deleted file mode 100644 index 42f143b..0000000 --- a/example/wiki_doc_elwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "elwiki_1", - "fields": { - "title_el": "Μηχανή αναζήτησης", - "text_el": "Μια μηχανή αναζήτησης είναι μια εφαρμογή που επιτρέπει την αναζήτηση κειμένων και αρχείων στο Διαδίκτυο. Αποτελείται από ένα πρόγραμμα υπολογιστή που βρίσκεται σε έναν ή περισσότερους υπολογιστές στους οποίους δημιουργεί μια βάση δεδομένων με τις πληροφορίες που συλλέγει από το διαδίκτυο, και το διαδραστικό περιβάλλον που εμφανίζεται στον τελικό χρήστη ο οποίος χρησιμοποιεί την εφαρμογή από άλλον υπολογιστή συνδεδεμένο στο διαδίκτυο. Οι μηχανές αναζήτησης αποτελούνται από 3 είδη λογισμικού, το spider software, το index software και το query software.", - "timestamp": "2017-11-21T19:57:00Z", - "_type": "elwiki" - } -} diff --git a/example/wiki_doc_enwiki_1.json b/example/wiki_doc_enwiki_1.json deleted file mode 100644 index bcb7d18..0000000 --- a/example/wiki_doc_enwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "enwiki_1", - "fields": { - "title_en": "Search engine (computing)", - "text_en": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", - "timestamp": "2018-07-04T05:41:00Z", - "_type": "enwiki" - } -} diff --git a/example/wiki_doc_eswiki_1.json b/example/wiki_doc_eswiki_1.json deleted file mode 100644 index 5d3c7aa..0000000 --- a/example/wiki_doc_eswiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "eswiki_1", - "fields": { - "title_es": "Motor de búsqueda", - "text_es": "Un motor de búsqueda o buscador es un sistema informático que busca archivos almacenados en servidores web gracias a su spider (también llamado araña web). Un ejemplo son los buscadores de Internet (algunos buscan únicamente en la web, pero otros lo hacen además en noticias, servicios como Gopher, FTP, etc.) cuando se pide información sobre algún tema. Las búsquedas se hacen con palabras clave o con árboles jerárquicos por temas; el resultado de la búsqueda «Página de resultados del buscador» es un listado de direcciones web en los que se mencionan temas relacionados con las palabras clave buscadas. Como operan de forma automática, los motores de búsqueda contienen generalmente más información que los directorios. Sin embargo, estos últimos también han de construirse a partir de búsquedas (no automatizadas) o bien a partir de avisos dados por los creadores de páginas.", - "timestamp": "2018-08-30T11:30:00Z", - "_type": "eswiki" - } -} diff --git a/example/wiki_doc_fawiki_1.json b/example/wiki_doc_fawiki_1.json deleted file mode 100644 index 093cc83..0000000 --- a/example/wiki_doc_fawiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "fawiki_1", - "fields": { - "title_fa": "موتور جستجو (پردازش)", - "text_fa": "موتور جستجو یا جویشگر، در فرهنگ رایانه، به طور عمومی به برنامه‌ای گفته می‌شود که کلمات کلیدی را در یک سند یا بانک اطلاعاتی جستجو می‌کند. در اینترنت به برنامه‌ای گفته می‌شود که کلمات کلیدی موجود در فایل‌ها و سندهای وب جهانی، گروه‌های خبری، منوهای گوفر و آرشیوهای FTP را جستجو می‌کند. جویشگرهای زیادی وجود دارند که امروزه از معروفترین و پراستفاده‌ترین آنها می‌توان به google و یاهو! جستجو اشاره کرد.", - "timestamp": "2017-01-06T02:46:00Z", - "_type": "fawiki" - } -} diff --git a/example/wiki_doc_fiwiki_1.json b/example/wiki_doc_fiwiki_1.json deleted file mode 100644 index e930816..0000000 --- a/example/wiki_doc_fiwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "fiwiki_1", - "fields": { - "title_fi": "Hakukone", - "text_fi": "Hakukone on web-pohjainen ohjelma, joka etsii jatkuvasti Internetistä (varsinkin Webistä) uusia sivuja eritellen ja liittäen ne hakemistoonsa erityisten hakusanojen mukaan. Näitä hyväksi käyttäen hakukone tulostaa käyttäjän syöttämiä hakusanoja lähimpänä olevat sivut. Analysointi tapahtuu käytännössä eri hakukoneissa erilaisilla menetelmillä.", - "timestamp": "2017-10-04T14:33:00Z", - "_type": "fiwiki" - } -} diff --git a/example/wiki_doc_frwiki_1.json b/example/wiki_doc_frwiki_1.json deleted file mode 100644 index 4090cd0..0000000 --- a/example/wiki_doc_frwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "frwiki_1", - "fields": { - "title_fr": "Moteur de recherche", - "text_fr": "Un moteur de recherche est une application web permettant de trouver des ressources à partir d'une requête sous forme de mots. Les ressources peuvent être des pages web, des articles de forums Usenet, des images, des vidéos, des fichiers, etc. Certains sites web offrent un moteur de recherche comme principale fonctionnalité ; on appelle alors « moteur de recherche » le site lui-même. Ce sont des instruments de recherche sur le web sans intervention humaine, ce qui les distingue des annuaires. Ils sont basés sur des « robots », encore appelés « bots », « spiders «, « crawlers » ou « agents », qui parcourent les sites à intervalles réguliers et de façon automatique pour découvrir de nouvelles adresses (URL). Ils suivent les liens hypertextes qui relient les pages les unes aux autres, les uns après les autres. Chaque page identifiée est alors indexée dans une base de données, accessible ensuite par les internautes à partir de mots-clés. C'est par abus de langage qu'on appelle également « moteurs de recherche » des sites web proposant des annuaires de sites web : dans ce cas, ce sont des instruments de recherche élaborés par des personnes qui répertorient et classifient des sites web jugés dignes d'intérêt, et non des robots d'indexation. Les moteurs de recherche ne s'appliquent pas qu'à Internet : certains moteurs sont des logiciels installés sur un ordinateur personnel. Ce sont des moteurs dits « de bureau » qui combinent la recherche parmi les fichiers stockés sur le PC et la recherche parmi les sites Web — on peut citer par exemple Exalead Desktop, Google Desktop et Copernic Desktop Search, Windex Server, etc. On trouve également des métamoteurs, c'est-à-dire des sites web où une même recherche est lancée simultanément sur plusieurs moteurs de recherche, les résultats étant ensuite fusionnés pour être présentés à l'internaute. On peut citer dans cette catégorie Ixquick, Mamma, Kartoo, Framabee ou Lilo.", - "timestamp": "2018-05-30T15:15:00Z", - "_type": "frwiki" - } -} diff --git a/example/wiki_doc_gawiki_1.json b/example/wiki_doc_gawiki_1.json deleted file mode 100644 index ad69390..0000000 --- a/example/wiki_doc_gawiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "gawiki_1", - "fields": { - "title_ga": "Inneall cuardaigh", - "text_ga": "Acmhainn ar an ngréasán domhanda atá insroichte le brabhsálaí Gréasáin, a chabhraíonn leis an úsáideoir ionaid is eolas a aimsiú. Bíonn na hinnill cuardaigh (Yahoo, Lycos, Google, Ask Jeeves) ag cuardach tríd an ngréasán an t-am ar fad, ag tógáil innéacsanna ábhar éagsúla — mar shampla, ag aimsiú teidil, fotheidil, eochairfhocail is céadlínte cáipéisí. Uaidh sin, is féidir cuid mhaith cáipéisí éagsúla ar ábhar ar leith a aisghabháil. Déanann an cuardach leanúnach cinnte de go bhfuil na hinnéacsanna suas chun dáta. Mar sin féin, aisghabhann na hinnill an-chuid cháipéisí nach mbaineann le hábhar, agus tá an-iarracht ar siúl an t-am ar fad iad a fheabhsú.", - "timestamp": "2013-10-27T18:17:00Z", - "_type": "gawiki" - } -} diff --git a/example/wiki_doc_glwiki_1.json b/example/wiki_doc_glwiki_1.json deleted file mode 100644 index 667e187..0000000 --- a/example/wiki_doc_glwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "glwiki_1", - "fields": { - "title_gl": "Motor de busca", - "text_gl": "Un motor de busca ou buscador é un sistema informático que procura arquivos almacenados en servidores web, un exemplo son os buscadores de internet (algúns buscan só na Web pero outros buscan ademais en News, Gopher, FTP etc.) cando lles pedimos información sobre algún tema. As procuras fanse con palabras clave ou con árbores xerárquicas por temas; o resultado da procura é unha listaxe de direccións Web nas que se mencionan temas relacionados coas palabras clave buscadas.", - "timestamp": "2016-10-31T13:33:00Z", - "_type": "glwiki" - } -} diff --git a/example/wiki_doc_guwiki_1.json b/example/wiki_doc_guwiki_1.json deleted file mode 100644 index a0afc9b..0000000 --- a/example/wiki_doc_guwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "guwiki_1", - "fields": { - "title_gu": "વેબ શોધ એન્જીન", - "text_gu": "વેબ શોધ એન્જિન એ વર્લ્ડ વાઈડ વેબ (World Wide Web) પર વિવિધ માહિતી શોધવા માટે ઉપયોગમાં લેવામાં આવે છે. શોધ લીસ્ટને સામાન્ય રીતે યાદીમાં દર્શાવવામાં આવે છે અને જેને સામાન્ય રીતે હીટ્સ કહેવામાં આવે છે. જે માહિતી મળે છે તેમાં વેબ પૃષ્ઠ (web page), છબીઓ, માહિતી અને અન્ય પ્રકારની ફાઈલો હોય છે. કેટલાક શોધ એન્જિનો ન્યુઝબુક, ડેટાબેઝ અને અન્ય પ્રકારની ઓપન ડીરેક્ટરી (open directories)ઓની વિગતો પણ આપે છે. વ્યકિતઓ દ્વારા દુરસ્ત થતી વેબ ડાયરેક્ટરીઝ (Web directories)થી અલગ રીતે, શોધ એન્જિન ઍલ્ગરિધમનો અથવા ઍલ્ગરિધમ (algorithmic) અને માનવીય બાબતોના મિક્ષણનો ઉપયોગ કરે છે.", - "timestamp": "2013-04-04T19:28:00Z", - "_type": "guwiki" - } -} diff --git a/example/wiki_doc_hiwiki_1.json b/example/wiki_doc_hiwiki_1.json deleted file mode 100644 index 494a176..0000000 --- a/example/wiki_doc_hiwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "hiwiki_1", - "fields": { - "title_hi": "खोज इंजन", - "text_hi": "ऐसे कम्प्यूटर प्रोग्राम खोजी इंजन (search engine) कहलाते हैं जो किसी कम्प्यूटर सिस्टम पर भण्डारित सूचना में से वांछित सूचना को ढूढ निकालते हैं। ये इंजन प्राप्त परिणामों को प्रायः एक सूची के रूप में प्रस्तुत करते हैं जिससे वांछित सूचना की प्रकृति और उसकी स्थिति का पता चलता है। खोजी इंजन किसी सूचना तक अपेक्षाकृत बहुत कम समय में पहुँचने में हमारी सहायता करते हैं। वे 'सूचना ओवरलोड' से भी हमे बचाते हैं। खोजी इंजन का सबसे प्रचलित रूप 'वेब खोजी इंजन' है जो वर्ल्ड वाइड वेब पर सूचना खोजने के लिये प्रयुक्त होता है।", - "timestamp": "2017-10-19T20:09:00Z", - "_type": "hiwiki" - } -} diff --git a/example/wiki_doc_huwiki_1.json b/example/wiki_doc_huwiki_1.json deleted file mode 100644 index 95f97a0..0000000 --- a/example/wiki_doc_huwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "huwiki_1", - "fields": { - "title_hu": "Keresőmotor", - "text_hu": "A keresőmotor az informatikában egy program vagy alkalmazás, amely bizonyos feltételeknek (többnyire egy szónak vagy kifejezésnek) megfelelő információkat keres valamilyen számítógépes környezetben. Ez a cikk a World Wide Weben (és esetleg az internet más részein, például a Useneten) kereső alkalmazásokról szól, a keresőmotor kifejezés önmagában általában ezekre vonatkozik. Másfajta keresőmotorokra példák a vállalati keresőmotorok, amik egy intraneten, és a személyi keresőmotorok, amik egy személyi számítógép állományai között keresnek.", - "timestamp": "2018-05-15T20:40:00Z", - "_type": "huwiki" - } -} diff --git a/example/wiki_doc_hywiki_1.json b/example/wiki_doc_hywiki_1.json deleted file mode 100644 index 0e36b1a..0000000 --- a/example/wiki_doc_hywiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "hywiki_1", - "fields": { - "title_hy": "Որոնողական համակարգ", - "text_hy": "Որոնողական համակարգը գործիք է, որը նախատեսված է համապատասխան բառերով Համաշխարհային ցանցում որոնումներ կատարելու համար։ Ստեղծված է համացանցում և FTP սերվերներում ինֆորմացիա փնտրելու համար։ Փնտրված արդյունքները ընդհանրապես ներկայացվում են արդյունքների ցանկում և սովորաբար կոչվում են նպատակակակետ, հիթ։ Ինֆորմացիան կարող է բաղկացած լինել վեբ էջերից, նկարներից, ինֆորմացիաներից և այլ տիպի ֆայլերից ու տվյալներից։ Այն կարող է օգտագործվել տարբեր տեսակի տեղեկատվություն որոնելու համար, ներառյալ՝ կայքեր, ֆորումներ, նկարներ, վիդեոներ, ֆայլեր և այլն։ Որոշ կայքեր արդեն իրենցից ներկայացնում են ինչ-որ որոնողական համակարգ, օրինակ՝ Dailymotion, YouTube և Google Videos ինտերնետում տեղադրված տեսահոլովակների որոնողական կայքեր են։ Որոնողական կայքը բաղկացած է \"ռոբոտներից\", որոնց անվանում են նաև bot, spider, crawler, որոնք ավտոմատ կերպով, առանց մարդկային միջամտության պարբերաբար հետազոտում են կայքերը։ Որոնողական կայքերը հետևում են հղումներին, որոնք կապված լինելով իրար հետ ինդեքսավորում է յուրաքանչյուր էջ տվյալների բազայում՝ հետագայում բանալի բառերի օգնությամբ դառնալով հասանելի ինտերնետից օգտվողների համար։ Սխալմամբ, որոնողական կայքեր են անվանում նաև այն կայքերը, որոնք իրենցից ներկայացնում են կայքային տեղեկատուներ։ Այս կայքերում ուշադրության արժանի կայքերը ցուցակագրվում և դասակարգվում են մարդկային ռեսուրսների շնորհիվ, այլ ոչ թե բոտերի կամ ռոբետների միջոցով։ Այդ կայքերից կարելի է նշել օրինակ՝ Yahoo!։ Yahoo!-ի որոնողական կայքը գտնվում է այստեղ։ Բոլոր որոնողական համակարգերը նախատեսված են ինտերնետում որոնում իրականացնելու համար, սակայն կան որոշ որոնողական համակարգերի տարատեսակներ, որոնք համակարգչային ծրագրեր են և հետևաբար տեղակայվում են համակարգչի մեջ։ Այս համակարգերը կոչվում են desktop։ Վերջիներս հնարավորություն են տալիս որոնելու թե համակարգչի մեջ կուտակված ֆայլեը, թե կայքերում տեղադրված ռեսուրսները։ Այդ ծրագրերից ամենահայտնիներն են՝ Exalead Desktop, Copernic Desktop Search Գոյություն ունեն նաև մետա-որոնողական համակարգեր, այսինքն կայքեր, որ նույն որոնումը կատարում են միաժամանակ տարբեր որոնողական կայքերի միջնորդությամբ։ Որոնման արդյունքները հետո դասակարգվում են որպեսզի ներկայացվեն օգտագործողին։ Մետա-որոնողական համակարգերի շարքից կարելի է թվարկել օրինակ՝ Mamma և Kartoo։", - "timestamp": "2017-11-20T17:47:00Z", - "_type": "hywiki" - } -} diff --git a/example/wiki_doc_idwiki_1.json b/example/wiki_doc_idwiki_1.json deleted file mode 100644 index 16e5802..0000000 --- a/example/wiki_doc_idwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "idwiki_1", - "fields": { - "title_id": "Mesin pencari web", - "text_id": "Mesin pencari web atau mesin telusur web (bahasa Inggris: web search engine) adalah program komputer yang dirancang untuk melakukan pencarian atas berkas-berkas yang tersimpan dalam layanan www, ftp, publikasi milis, ataupun news group dalam sebuah ataupun sejumlah komputer peladen dalam suatu jaringan. Mesin pencari merupakan perangkat penelusur informasi dari dokumen-dokumen yang tersedia. Hasil pencarian umumnya ditampilkan dalam bentuk daftar yang seringkali diurutkan menurut tingkat akurasi ataupun rasio pengunjung atas suatu berkas yang disebut sebagai hits. Informasi yang menjadi target pencarian bisa terdapat dalam berbagai macam jenis berkas seperti halaman situs web, gambar, ataupun jenis-jenis berkas lainnya. Beberapa mesin pencari juga diketahui melakukan pengumpulan informasi atas data yang tersimpan dalam suatu basis data ataupun direktori web. Sebagian besar mesin pencari dijalankan oleh perusahaan swasta yang menggunakan algoritme kepemilikan dan basis data tertutup, di antaranya yang paling populer adalah safari Google (MSN Search dan Yahoo!). Telah ada beberapa upaya menciptakan mesin pencari dengan sumber terbuka (open source), contohnya adalah Htdig, Nutch, Egothor dan OpenFTS.", - "timestamp": "2017-11-20T17:47:00Z", - "_type": "idwiki" - } -} diff --git a/example/wiki_doc_itwiki_1.json b/example/wiki_doc_itwiki_1.json deleted file mode 100644 index b8bdd5d..0000000 --- a/example/wiki_doc_itwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "itwiki_1", - "fields": { - "title_it": "Motore di ricerca", - "text_it": "Nell'ambito delle tecnologie di Internet, un motore di ricerca (in inglese search engine) è un sistema automatico che, su richiesta, analizza un insieme di dati (spesso da esso stesso raccolti) e restituisce un indice dei contenuti disponibili[1] classificandoli in modo automatico in base a formule statistico-matematiche che ne indichino il grado di rilevanza data una determinata chiave di ricerca. Uno dei campi in cui i motori di ricerca trovano maggiore utilizzo è quello dell'information retrieval e nel web. I motori di ricerca più utilizzati nel 2017 sono stati: Google, Bing, Baidu, Qwant, Yandex, Ecosia, DuckDuckGo.", - "timestamp": "2018-07-16T12:20:00Z", - "_type": "itwiki" - } -} diff --git a/example/wiki_doc_jawiki_1.json b/example/wiki_doc_jawiki_1.json deleted file mode 100644 index 264ff02..0000000 --- a/example/wiki_doc_jawiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "jawiki_1", - "fields": { - "title_ja": "検索エンジン", - "text_ja": "検索エンジン(けんさくエンジン、英語: search engine)は、狭義にはインターネットに存在する情報(ウェブページ、ウェブサイト、画像ファイル、ネットニュースなど)を検索する機能およびそのプログラム。インターネットの普及初期には、検索としての機能のみを提供していたウェブサイトそのものを検索エンジンと呼んだが、現在では様々なサービスが加わったポータルサイト化が進んだため、検索をサービスの一つとして提供するウェブサイトを単に検索サイトと呼ぶことはなくなっている。広義には、インターネットに限定せず情報を検索するシステム全般を含む。狭義の検索エンジンは、ロボット型検索エンジン、ディレクトリ型検索エンジン、メタ検索エンジンなどに分類される。広義の検索エンジンとしては、ある特定のウェブサイト内に登録されているテキスト情報の全文検索機能を備えたソフトウェア(全文検索システム)等がある。検索エンジンは、検索窓と呼ばれるボックスにキーワードを入力して検索をかけるもので、全文検索が可能なものと不可能なものとがある。検索サイトを一般に「検索エンジン」と呼ぶことはあるが、厳密には検索サイト自体は検索エンジンでない。", - "timestamp": "2018-05-30T00:52:00Z", - "_type": "jawiki" - } -} diff --git a/example/wiki_doc_knwiki_1.json b/example/wiki_doc_knwiki_1.json deleted file mode 100644 index a24e9cc..0000000 --- a/example/wiki_doc_knwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "knwiki_1", - "fields": { - "title_kn": "ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ", - "text_kn": "ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ ಎಂದರೆ World Wide Webನಲ್ಲಿ ಮಾಹಿತಿ ಹುಡುಕುವುದಕ್ಕಾಗಿ ವಿನ್ಯಾಸಗೊಳಿಸಲಾದ ಒಂದು ಸಾಧನ. ಹುಡುಕಾಟದ ಫಲಿತಾಂಶಗಳನ್ನು ಸಾಮಾನ್ಯವಾಗಿ ಒಂದು ಪಟ್ಟಿಯ ರೂಪದಲ್ಲಿ ಪ್ರಸ್ತುತಪಡಿಸಲಾಗುತ್ತದೆ ಮತ್ತು ಇವನ್ನು ’ಹಿಟ್ಸ್’ ಎಂದು ಕರೆಯಲಾಗುತ್ತದೆ. ಈ ಮಾಹಿತಿಯು ಅನೇಕ ಜಾಲ ಪುಟಗಳು, ಚಿತ್ರಗಳು, ಮಾಹಿತಿ ಹಾಗೂ ಇತರೆ ಕಡತಗಳನ್ನು ಹೊಂದಿರಬಹುದು. ಕೆಲವು ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಬೇರೆ ದತ್ತಸಂಚಯಗಳು ಅಥವಾ ಮುಕ್ತ ಮಾಹಿತಿ ಸೂಚಿಗಳಿಂದ ದತ್ತಾಂಶಗಳ ಗಣಿಗಾರಿಕೆ ಮಾಡಿ ಹೊರತೆಗೆಯುತ್ತವೆ. ಜಾಲ ಮಾಹಿತಿಸೂಚಿಗಳನ್ನು ಸಂಬಂಧಿಸಿದ ಸಂಪಾದಕರು ನಿರ್ವಹಿಸಿದರೆ, ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಗಣನಪದ್ಧತಿಯ ಮೂಲಕ ಅಥವಾ ಗಣನಪದ್ಧತಿ ಮತ್ತು ಮಾನವ ಹೂಡುವಳಿಯ ಮಿಶ್ರಣದ ಮುಖಾಂತರ ಕಾರ್ಯನಿರ್ವಹಿಸುತ್ತವೆ.", - "timestamp": "2017-10-03T14:13:00Z", - "_type": "knwiki" - } -} diff --git a/example/wiki_doc_kowiki_1.json b/example/wiki_doc_kowiki_1.json deleted file mode 100644 index 3a612fe..0000000 --- a/example/wiki_doc_kowiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "kowiki_1", - "fields": { - "title_cjk": "검색 엔진", - "text_cjk": "검색 엔진은 컴퓨터 시스템에 저장된 정보를 찾아주는 것을 도와주도록 설계된 정보 검색 시스템이다. 이러한 검색 결과는 목록으로 표현되는 것이 보통이다. 검색 엔진을 사용하면 정보를 찾는데 필요한 시간을 최소화할 수 있다. 가장 눈에 띄는 형태의 공용 검색 엔진으로는 웹 검색 엔진이 있으며 월드 와이드 웹에서 정보를 찾아준다.", - "timestamp": "2017-11-19T12:50:00Z", - "_type": "kowiki" - } -} diff --git a/example/wiki_doc_mlwiki_1.json b/example/wiki_doc_mlwiki_1.json deleted file mode 100644 index 09c633b..0000000 --- a/example/wiki_doc_mlwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "mlwiki_1", - "fields": { - "title_ml": "വെബ് സെർച്ച് എഞ്ചിൻ", - "text_ml": "വേൾഡ് വൈഡ് വെബ്ബിലുള്ള വിവരങ്ങൾ തിരയാനുള്ള ഒരു ഉപാധിയാണ്‌ വെബ് സെർച്ച് എഞ്ചിൻ അഥവാ സെർച്ച് എഞ്ചിൻ. തിരച്ചിൽ ഫലങ്ങൾ സാധാരണായായി ഒരു പട്ടികയായി നൽകുന്നു, തിരച്ചിൽ ഫലങ്ങളെ ഹിറ്റുകൾ എന്നാണ്‌ വിളിച്ചുവരുന്നത്[അവലംബം ആവശ്യമാണ്]. തിരച്ചിൽ ഫലങ്ങളിൽ വെബ് പേജുകൾ, ചിത്രങ്ങൾ, വിവരങ്ങൾ, വെബ്ബിലുള്ള മറ്റ് ഫയൽ തരങ്ങൾ എന്നിവ ഉൾപ്പെടാം. അൽഗോരിതങ്ങൾ ഉപയോഗിച്ചാണ് സെർച്ച് എഞ്ചിനുകൾ പ്രവർത്തിക്കുന്നത്.", - "timestamp": "2010-05-05T15:06:00Z", - "_type": "mlwiki" - } -} diff --git a/example/wiki_doc_nlwiki_1.json b/example/wiki_doc_nlwiki_1.json deleted file mode 100644 index 0b2a52f..0000000 --- a/example/wiki_doc_nlwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "nlwiki_1", - "fields": { - "title_nl": "Zoekmachine", - "text_nl": "Een zoekmachine is een computerprogramma waarmee informatie kan worden gezocht in een bepaalde collectie; dit kan een bibliotheek, het internet, of een persoonlijke verzameling zijn. Zonder nadere aanduiding wordt meestal een webdienst bedoeld waarmee met behulp van vrije trefwoorden volledige tekst (full text) kan worden gezocht in het gehele wereldwijde web. In tegenstelling tot startpagina's of webgidsen is er geen of zeer weinig menselijke tussenkomst nodig; het bezoeken van de webpagina's en het sorteren van de rangschikkingen gebeurt met behulp van een algoritme. Google is wereldwijd de meest gebruikte zoekmachine, andere populaire zoekmachines zijn Yahoo!, Bing en Baidu.", - "timestamp": "2018-05-07T11:05:00Z", - "_type": "nlwiki" - } -} diff --git a/example/wiki_doc_nowiki_1.json b/example/wiki_doc_nowiki_1.json deleted file mode 100644 index 39d5a35..0000000 --- a/example/wiki_doc_nowiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "nowiki_1", - "fields": { - "title_no": "Søkemotor", - "text_no": "En søkemotor er en type programvare som leter frem informasjon fra Internett (nettsider eller andre nettressurser) eller begrenset til et datasystem, der informasjonen samsvarer med et gitt søk, og rangerer treffene etter hva den oppfatter som mest relevant. Typisk ligger søkemotoren tilgjengelig som et nettsted, der brukeren legger inn søkeord ev. sammen med filterinnstillinger, og treffene vises gjerne som klikkbare lenker. Søkemotoren kan enten gjøre søk på hele Internett (for eksempel Google, Bing, Kvasir og Yahoo!), innenfor et bestemt nettsted (for eksempel søk innenfor VGs nettavis), eller innenfor et bestemt tema (f.eks. Kelkoo, som søker etter priser på produkter, og Picsearch, som søker etter bilder). En bedrift kan også sette opp en intern bedrifts-søkemotor for å få enklere tilgang til alle dokumenter og databaser i bedriften.", - "timestamp": "2018-02-05T14:15:00Z", - "_type": "nowiki" - } -} diff --git a/example/wiki_doc_pswiki_1.json b/example/wiki_doc_pswiki_1.json deleted file mode 100644 index 645fa9e..0000000 --- a/example/wiki_doc_pswiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "pswiki_1", - "fields": { - "title_ps": "انټرنټ لټوونکی ماشين", - "text_ps": "نټرنټ د معلوماتو يوه داسې پراخه نړۍ ده چې يوه پوله هم نه لري. هره ثانيه په زرگونو معلوماتي توکي په کې ورځای کېږي، خو بيا هم د ډکېدو کومه اندېښنه نه رامنځته کېږي. حيرانوونکې خبره بيا دا ده چې دغه ټول معلومات په داسې مهارت سره په دغه نړۍ کې ځای شوي دي، چې سړی يې د سترگو په رپ کې د نړۍ په هر گوټ کې ترلاسه کولای شي. د کيبورډ په يو دوو تڼيو زور کولو او د موږك په يو دوو کليکونو سره خپلو ټولو پوښتنو ته ځواب موندلای شئ. ټول معلومات په ځانگړو انټرنټ پاڼو کې خوندي وي، نو که سړي ته د يوې پاڼې پته معلومه وي نو سم له لاسه به دغه پاڼه د انټرنټ پاڼو په کتونکي پروگرام کې پرانيزي، خو که سړی بيا يو معلومات غواړي او د هغې پاڼې پته ورسره نه وي، چې دغه ځانگړي معلومات په كې ځای شوي دي، نو بيا سړی يوه داسې پياوړي ځواک ته اړتيا لري، چې د سترگو په رپ کې ټول انټرنټ چاڼ کړي او دغه ځانگړي معلومات راوباسي. له نېکه مرغه د دغه ځواک غم خوړل شوی دی او ډېرInternet Search Engine انټرنټ لټوونکي ماشينونه جوړ کړای شوي دي، چې په وړيا توگه ټول انټرنټ تر ثانيو هم په لږ وخت کې چاڼ کوي او زموږ د خوښې معلومات راښکاره کوي. دغو ماشينونو ته سړی يوه ځانگړې کليمه ورکوي او هغوی ټول انټرنټ په دغې وركړل شوې کلمې پسې لټوي او هر دقيق معلومات چې لاسته ورځي، نو د کمپيوټر پر پرده يې راښکاره کوي. د دغو ماشينونو په ډله کې يو پياوړی ماشين د Google په نوم دی. د نوموړي ماشين بنسټ په ١٩٩٨م کال کې د متحدو ايالاتو د Standford پوهنتون دوو محصلينو Larry Page او Sergey Brin کښېښود. د دغه ماشين خدمات سړی د www.google.com په انټرنټ پاڼه کې کارولای شي. نوموړی ماشين د نړۍ په گڼ شمېر ژبو باندې خدمات وړاندې کوي او داسې چټک او دقيق لټون کوي چې د انټرنټ نور ډېر غښتلي ماشينونه ورته گوته پر غاښ پاتې دي. گوگل په ټوله نړۍ کې کارول کېږي او تر نيمي ثانيي هم په لنډ وخت کې په ميليارډونو انټرنټ پاڼې چاڼ کوي او خپلو کاروونکو ته په پرتله ييزه توگه دقيق معلومات راباسي. گوگل په يوه ورځ کې څه كمُ ٢٠٠ ميليونه پوښتنې ځوابوي. دا ( گوگل) تورى خپله د يو امريکايي رياضيپوه د وراره له خوا په لومړي ځل د يوې لوبې لپاره کارول شوی و. هغه دغه تورى د يو سلو صفرونو ( 1000?.) غوندې لوی عدد ته د نوم په توگه کاراوه. دغه نوم د نوموړي شرکت د دغه توان ښكارندوى دى، چې په لنډ وخت کې په لويه کچه پوښتنو ته ځواب ورکوي او معلومات لټوي. سړی چې د گوگل چټکتيا او دقيقوالي ته ځير شي، نو دا پوښته راپورته کېږي چې د دې ماشين شا ته به څومره پرمختللي کمپيوټرونه او پياوړی تخنيک پټ وي. خو اصلاً د گوگل شا ته په يوه لوی جال کې د منځنۍ بيې کمپيوټرونه سره نښلول شوي دي . په دې توگه په زرگونو کمپيوټرونه هممهاله په کار بوخت وي، چې په ترڅ کې يې د معلوماتو لټول او چاڼ کول چټکتيا مومي. د يوې پوښتنې له اخيستلو څخه راواخله معلوماتو تر لټولو او بيا د دقيقوالي له مخې په يوه ځانگړي طرز بېرته کاروونکي يا پوښتونكي تر ښوولو پورې ټولې چارې د درېيو Software پروگرامونه په لاس کې دي، چې په دغه زرگونو کمپيوټرونو کې ځای پر ځای شوي دي.", - "timestamp": "2015-12-15T18:53:00Z", - "_type": "pswiki" - } -} diff --git a/example/wiki_doc_ptwiki_1.json b/example/wiki_doc_ptwiki_1.json deleted file mode 100644 index b79cbb6..0000000 --- a/example/wiki_doc_ptwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "ptwiki_1", - "fields": { - "title_pt": "Motor de busca", - "text_pt": "Motor de pesquisa (português europeu) ou ferramenta de busca (português brasileiro) ou buscador (em inglês: search engine) é um programa desenhado para procurar palavras-chave fornecidas pelo utilizador em documentos e bases de dados. No contexto da internet, um motor de pesquisa permite procurar palavras-chave em documentos alojados na world wide web, como aqueles que se encontram armazenados em websites. Os motores de busca surgiram logo após o aparecimento da Internet, com a intenção de prestar um serviço extremamente importante: a busca de qualquer informação na rede, apresentando os resultados de uma forma organizada, e também com a proposta de fazer isto de uma maneira rápida e eficiente. A partir deste preceito básico, diversas empresas se desenvolveram, chegando algumas a valer milhões de dólares. Entre as maiores empresas encontram-se o Google, o Yahoo, o Bing, o Lycos, o Cadê e, mais recentemente, a Amazon.com com o seu mecanismo de busca A9 porém inativo. Os buscadores se mostraram imprescindíveis para o fluxo de acesso e a conquista novos visitantes. Antes do advento da Web, havia sistemas para outros protocolos ou usos, como o Archie para sites FTP anônimos e o Veronica para o Gopher (protocolo de redes de computadores que foi desenhado para indexar repositórios de documentos na Internet, baseado-se em menus).", - "timestamp": "2017-11-09T14:38:00Z", - "_type": "ptwiki" - } -} diff --git a/example/wiki_doc_rowiki_1.json b/example/wiki_doc_rowiki_1.json deleted file mode 100644 index 7562616..0000000 --- a/example/wiki_doc_rowiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "rowiki_1", - "fields": { - "title_ro": "Motor de căutare", - "text_ro": "Un motor de căutare este un program apelabil căutător, care accesează Internetul în mod automat și frecvent și care stochează titlul, cuvinte cheie și, parțial, chiar conținutul paginilor web într-o bază de date. În momentul în care un utilizator apelează la un motor de căutare pentru a găsi o informație, o anumită frază sau un cuvânt, motorul de căutare se va uita în această bază de date și, în funcție de anumite criterii de prioritate, va crea și afișa o listă de rezultate (engleză: hit list ).", - "timestamp": "2018-06-12T08:59:00Z", - "_type": "rowiki" - } -} diff --git a/example/wiki_doc_ruwiki_1.json b/example/wiki_doc_ruwiki_1.json deleted file mode 100644 index 818b84f..0000000 --- a/example/wiki_doc_ruwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "ruwiki_1", - "fields": { - "title_ru": "Поисковая машина", - "text_ru": "Поисковая машина (поиско́вый движо́к) — комплекс программ, предназначенный для поиска информации. Обычно является частью поисковой системы. Основными критериями качества работы поисковой машины являются релевантность (степень соответствия запроса и найденного, т.е. уместность результата), полнота индекса, учёт морфологии языка.", - "timestamp": "2017-03-22T01:16:00Z", - "_type": "ruwiki" - } -} diff --git a/example/wiki_doc_svwiki_1.json b/example/wiki_doc_svwiki_1.json deleted file mode 100644 index 4c9210e..0000000 --- a/example/wiki_doc_svwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "svwiki_1", - "fields": { - "title_sv": "Söktjänst", - "text_sv": "En söktjänst är en webbplats som gör det möjligt att söka efter innehåll på Internet. Söktjänsterna använder sökmotorer, även kallade sökrobotar, för att upptäcka, hämta in och indexera webbsidor.", - "timestamp": "2018-08-16T22:13:00Z", - "_type": "svwiki" - } -} diff --git a/example/wiki_doc_tawiki_1.json b/example/wiki_doc_tawiki_1.json deleted file mode 100644 index 1b7e1aa..0000000 --- a/example/wiki_doc_tawiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "tawiki_1", - "fields": { - "title_ta": "தேடுபொறி", - "text_ta": "தேடுபொறி அல்லது தேடற்பொறி என்பது ஒரு கணினி நிரலாகும். இது இணையத்தில் குவிந்து கிடக்கும் தகவல்களில் இருந்தோ கணினியில் இருக்கும் தகவல்களில் இருந்தோ நமக்குத் தேவையான தகவலைப்பெற உதவுகின்றது. பொதுவாகப் பாவனையாளர்கள் ஒரு விடயம் சம்பந்தமாகத் தேடுதலை ஒரு சொல்லை வைத்து தேடுவார்கள். தேடுபொறிகள் சுட்டிகளைப் பயன்படுத்தி விரைவான தேடலை மேற்கொள்ளும். தேடுபொறிகள் என்பது பொதுவாக இணையத் தேடுபொறிகளை அல்லது இணையத் தேடற்பொறிகளையே குறிக்கும். வேறுசில தேடுபொறிகள் உள்ளூர் வலையமைப்பை மாத்திரமே தேடும். இணைய தேடு பொறிகள் பல பில்லியன் பக்கங்களில் இருந்து நமக்குத் தேவையான மிகப் பொருத்தமான பக்கங்களைத் தேடித் தரும். வேறுசில தேடற்பொறிகள் செய்திக் குழுக்கள், தகவற்தளங்கள், திறந்த இணையத்தளங்களைப் பட்டியலிடும் DMOZ.org போன்ற இணையத் தளங்களைத் தேடும். மனிதர்களால் எழுதப்பட்ட இணையத் தளங்களைப் பட்டியலிடும் தளங்களைப் போன்றல்லாது தேடு பொறிகள் அல்காரிதங்களைப் பாவித்துத் தேடல்களை மேற்கொள்ளும். வேறு சில தேடற்பொறிகளோ தமது இடைமுகத்தை வழங்கினாலும் உண்மையில் வேறுசில தேடுபொறிகளே தேடலை மேற்கொள்ளும். ஆரம்ப காலத்தில் ASCII முறை வரியுருக்களை கொண்டே தேடு சொற்களை உள்ளிட முடிந்தது. தற்போது ஒருங்குறி எழுத்துக்குறிமுறையை பல தேடுபொறிகளும் ஆதரிப்பதால் ஆங்கிலத்தில் மட்டுமல்லாது உலக மொழிகள் அனைத்திலும் அவ்வம் மொழிப்பக்கங்களை தேடிப்பெறக்கூடியதாகவுள்ளது.", - "timestamp": "2017-12-24T10:30:00Z", - "_type": "tawiki" - } -} diff --git a/example/wiki_doc_tewiki_1.json b/example/wiki_doc_tewiki_1.json deleted file mode 100644 index 2cb70b5..0000000 --- a/example/wiki_doc_tewiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "tewiki_1", - "fields": { - "title_te": "వెబ్ శోధనా యంత్రం", - "text_te": "వెబ్ శోధన యంత్రం అనేది వరల్డ్ వైడ్ వెబ్/ప్రపంచ వ్యాప్త వెబ్లో సమాచారాన్ని శోదించటానికి తయారుచేసిన ఒక సాధనం. శోధన ఫలితాలు సాధారణంగా ఒక జాబితాలో ఇవ్వబడతాయి మరియు అవి సాధారణంగా హిట్స్ అని పిలువబడతాయి. ఆ సమాచారం వెబ్ పేజీలు, చిత్రాలు, సమాచారం మరియు ఇతర రకాలైన జాబితాలను కలిగి ఉంటుంది.కొన్ని శోధనా యంత్రాలు డేటా బేస్ లు లేదా ఓపెన్ డైరెక్టరీలలో అందుబాటులో ఉన్న సమాచారాన్ని కూడా వెలికితీస్తాయి. మానవ సంపాదకులచే నిర్వహించబడే క్రమపరిచిన వెబ్ డైరెక్టరీల లా కాకుండా, శోధనా యంత్రాలు సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి ద్వారా లేదా సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి మరియు మానవ శక్తిల మిశ్రమంతో పనిచేస్తాయి.", - "timestamp": "2017-06-19T11:22:00Z", - "_type": "tewiki" - } -} diff --git a/example/wiki_doc_thwiki_1.json b/example/wiki_doc_thwiki_1.json deleted file mode 100644 index 9379367..0000000 --- a/example/wiki_doc_thwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "thwiki_1", - "fields": { - "title_th": "เสิร์ชเอนจิน", - "text_th": "เสิร์ชเอนจิน (search engine) หรือ โปรแกรมค้นหา คือ โปรแกรมที่ช่วยในการสืบค้นหาข้อมูล โดยเฉพาะข้อมูลบนอินเทอร์เน็ต โดยครอบคลุมทั้งข้อความ รูปภาพ ภาพเคลื่อนไหว เพลง ซอฟต์แวร์ แผนที่ ข้อมูลบุคคล กลุ่มข่าว และอื่น ๆ ซึ่งแตกต่างกันไปแล้วแต่โปรแกรมหรือผู้ให้บริการแต่ละราย. เสิร์ชเอนจินส่วนใหญ่จะค้นหาข้อมูลจากคำสำคัญ (คีย์เวิร์ด) ที่ผู้ใช้ป้อนเข้าไป จากนั้นก็จะแสดงรายการผลลัพธ์ที่มันคิดว่าผู้ใช้น่าจะต้องการขึ้นมา ในปัจจุบัน เสิร์ชเอนจินบางตัว เช่น กูเกิล จะบันทึกประวัติการค้นหาและการเลือกผลลัพธ์ของผู้ใช้ไว้ด้วย และจะนำประวัติที่บันทึกไว้นั้น มาช่วยกรองผลลัพธ์ในการค้นหาครั้งต่อ ๆ ไป", - "timestamp": "2016-06-18T11:06:00Z", - "_type": "thwiki" - } -} diff --git a/example/wiki_doc_trwiki_1.json b/example/wiki_doc_trwiki_1.json deleted file mode 100644 index 14dace8..0000000 --- a/example/wiki_doc_trwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "trwiki_1", - "fields": { - "title_tr": "Arama motoru", - "text_tr": "Arama motoru, İnternet üzerinde bulunan içeriği aramak için kullanılan bir mekanizmadır. Üç bileşenden oluşur: web robotu, arama indeksi ve kullanıcı arabirimi. Ancak arama sonuçları genellikle sık tıklanan internet sayfalarından oluşan bir liste olarak verilmektedir.", - "timestamp": "2018-03-13T17:37:00Z", - "_type": "trwiki" - } -} diff --git a/example/wiki_doc_zhwiki_1.json b/example/wiki_doc_zhwiki_1.json deleted file mode 100644 index 98f1376..0000000 --- a/example/wiki_doc_zhwiki_1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "zhwiki_1", - "fields": { - "title_zh": "搜索引擎", - "text_zh": "搜索引擎(英语:search engine)是一种信息检索系统,旨在协助搜索存储在计算机系统中的信息。搜索结果一般被称为“hits”,通常会以表单的形式列出。网络搜索引擎是最常见、公开的一种搜索引擎,其功能为搜索万维网上储存的信息.", - "timestamp": "2018-08-27T05:47:00Z", - "_type": "zhwiki" - } -} diff --git a/examples/example_bulk_delete.txt b/examples/example_bulk_delete.txt new file mode 100644 index 0000000..3bb459b --- /dev/null +++ b/examples/example_bulk_delete.txt @@ -0,0 +1,11 @@ +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 diff --git a/examples/example_bulk_index.json b/examples/example_bulk_index.json new file mode 100644 index 0000000..fab37a7 --- /dev/null +++ b/examples/example_bulk_index.json @@ -0,0 +1,11 @@ +{"id": "1","fields": {"title": "Blast", "text": "Blast is a full text search and indexing server, written in Go, built on top of Bleve.", "timestamp": "2019-12-16T07:12:00Z", "_type": "example"}} +{"id": "2","fields": {"title": "Bleve", "text": "Bleve is a modern text indexing library for go.", "timestamp": "2019-10-30T16:13:00Z", "_type": "example"}} +{"id": "3","fields": {"title": "Riot", "text": "Riot is Go Open Source, Distributed, Simple and efficient full text search engine.", "timestamp": "2019-12-16T07:12:00Z", "_type": "example"}} +{"id": "4","fields": {"title": "Bayard", "text": "Bayard is a full text search and indexing server, written in Rust, built on top of Tantivy.", "timestamp": "2019-12-19T10:41:00Z", "_type": "example"}} +{"id": "5","fields": {"title": "Toshi", "text": "Toshi is meant to be a full-text search engine similar to Elasticsearch. Toshi strives to be to Elasticsearch what Tantivy is to Lucene.", "timestamp": "2019-12-02T04:00:00Z", "_type": "example"}} +{"id": "6","fields": {"title": "Tantivy", "text": "Tantivy is a full-text search engine library inspired by Apache Lucene and written in Rust.", "timestamp": "2019-12-19T10:07:00Z", "_type": "example"}} +{"id": "7","fields": {"title": "Sonic", "text": "Sonic is a fast, lightweight and schema-less search backend.", "timestamp": "2019-12-10T23:13:00Z", "_type": "example"}} +{"id": "8","fields": {"title": "Apache Solr", "text": "Solr is highly reliable, scalable and fault tolerant, providing distributed indexing, replication and load-balanced querying, automated failover and recovery, centralized configuration and more.", "timestamp": "2019-12-19T14:08:00Z", "_type": "example"}} +{"id": "9","fields": {"title": "Elasticsearch", "text": "Elasticsearch is a distributed, open source search and analytics engine for all types of data, including textual, numerical, geospatial, structured, and unstructured.", "timestamp": "2019-12-19T08:19:00Z", "_type": "example"}} +{"id": "10","fields": {"title": "Lucene", "text": "Apache Lucene is a high-performance, full-featured text search engine library written entirely in Java.", "timestamp": "2019-12-19T14:08:00Z", "_type": "example"}} +{"id": "11","fields": {"title": "Whoosh", "text": "Whoosh is a fast, pure Python search engine library.", "timestamp": "2019-10-08T05:30:26Z", "_type": "example"}} diff --git a/examples/example_doc_1.json b/examples/example_doc_1.json new file mode 100644 index 0000000..09f6cad --- /dev/null +++ b/examples/example_doc_1.json @@ -0,0 +1,8 @@ +{ + "fields": { + "title": "Blast", + "text": "Blast is a full text search and indexing server, written in Go, built on top of Bleve.", + "timestamp": "2019-12-16T07:12:00Z", + "_type": "example" + } +} diff --git a/examples/example_mapping.json b/examples/example_mapping.json new file mode 100644 index 0000000..118348c --- /dev/null +++ b/examples/example_mapping.json @@ -0,0 +1,103 @@ +{ + "types": { + "example": { + "enabled": true, + "dynamic": true, + "properties": { + "title": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "en", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "en" + }, + "text": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "en", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "en" + }, + "url": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "keyword", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "keyword" + }, + "timestamp": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "datetime", + "store": true, + "index": true, + "include_in_all": true + } + ], + "default_analyzer": "" + }, + "_type": { + "enabled": true, + "dynamic": true, + "fields": [ + { + "type": "text", + "analyzer": "keyword", + "store": true, + "index": true, + "include_term_vectors": true, + "include_in_all": true + } + ], + "default_analyzer": "keyword" + } + }, + "default_analyzer": "en" + } + }, + "default_mapping": { + "enabled": true, + "dynamic": true, + "default_analyzer": "standard" + }, + "type_field": "_type", + "default_type": "_default", + "default_analyzer": "standard", + "default_datetime_parser": "dateTimeOptional", + "default_field": "_all", + "store_dynamic": true, + "index_dynamic": true, + "analysis": { + "analyzers": {}, + "char_filters": {}, + "tokenizers": {}, + "token_filters": {}, + "token_maps": {} + } +} diff --git a/example/wiki_search_request.json b/examples/example_search_request.json similarity index 100% rename from example/wiki_search_request.json rename to examples/example_search_request.json diff --git a/example/wiki_search_request_prefix.json b/examples/example_search_request_prefix.json similarity index 100% rename from example/wiki_search_request_prefix.json rename to examples/example_search_request_prefix.json diff --git a/example/wiki_search_request_simple.json b/examples/example_search_request_simple.json similarity index 100% rename from example/wiki_search_request_simple.json rename to examples/example_search_request_simple.json diff --git a/examples/geo_example_bulk_index.json b/examples/geo_example_bulk_index.json new file mode 100644 index 0000000..fbcbad7 --- /dev/null +++ b/examples/geo_example_bulk_index.json @@ -0,0 +1,6 @@ +{"id":"1","fields":{"name":"Brewpub-on-the-Green","city":"Fremont","state":"California","code":"","country":"United States","phone":"","website":"","updated":"2010-07-22 20:00:20","description":"","address":[],"geo":{"accuracy":"APPROXIMATE","lat":37.5483,"lon":-121.989},"_type":"geo_example"}} +{"id":"2","fields":{"name":"Capital City Brewing Company","city":"Washington","state":"District of Columbia","code":"20005","country":"United States","phone":"202.628.2222","website":"http://www.capcitybrew.com","updated":"2010-07-22 20:00:20","description":"Washington DC's first brewpub since prohibition, Capitol City Brewing Co. opened its doors in 1992. Our first location still stands in Downtown DC, at 11th and H St., NW. Our company policy is to bring the fine craft of brewing to every person who lives and visits our region, as well as treating them to a wonderful meal and a great experience.","address":["1100 New York Ave, NW"],"geo":{"accuracy":"ROOFTOP","lat":38.8999,"lon":-77.0272},"_type":"geo_example"}} +{"id":"3","fields":{"name":"Firehouse Grill & Brewery","city":"Sunnyvale","state":"California","code":"94086","country":"United States","phone":"1-408-773-9500","website":"","updated":"2010-07-22 20:00:20","description":"","address":["111 South Murphy Avenue"],"geo":{"accuracy":"RANGE_INTERPOLATED","lat":37.3775,"lon":-122.03},"_type":"geo_example"}} +{"id":"4","fields":{"name":"Hook & Ladder Brewing Company","city":"Silver Spring","state":"Maryland","code":"20910","country":"United States","phone":"301.565.4522","website":"http://www.hookandladderbeer.com","updated":"2010-07-22 20:00:20","description":"At Hook & Ladder Brewing we believe in great beer in the company of good friends, so we bring you three great beers for your drinking pleasure (please drink responsibly). Each of our beers is carefully crafted with the finest quality ingredients for a distinctive taste we know you will enjoy. Try one tonight, you just might get hooked. Through our own experiences in the fire and rescue service we have chosen the Hook & Ladder as a symbol of pride and honor to pay tribute to the brave men and women who serve and protect our communities.","address":["8113 Fenton St."],"geo":{"accuracy":"ROOFTOP","lat":38.9911,"lon":-77.0237},"_type":"geo_example"}} +{"id":"5","fields":{"name":"Jack's Brewing","city":"Fremont","state":"California","code":"94538","country":"United States","phone":"1-510-796-2036","website":"","updated":"2010-07-22 20:00:20","description":"","address":["39176 Argonaut Way"],"geo":{"accuracy":"ROOFTOP","lat":37.5441,"lon":-121.988},"_type":"geo_example"}} +{"id":"6","fields":{"name":"Sweet Water Tavern and Brewery","city":"Sterling","state":"Virginia","code":"20121","country":"United States","phone":"(703) 449-1108","website":"http://www.greatamericanrestaurants.com/sweetMainSter/index.htm","updated":"2010-07-22 20:00:20","description":"","address":["45980 Waterview Plaza"],"geo":{"accuracy":"RANGE_INTERPOLATED","lat":39.0324,"lon":-77.4097},"_type":"geo_example"}} diff --git a/example/geo_doc_1.json b/examples/geo_example_doc_1.json similarity index 89% rename from example/geo_doc_1.json rename to examples/geo_example_doc_1.json index 9cbc825..c359461 100644 --- a/example/geo_doc_1.json +++ b/examples/geo_example_doc_1.json @@ -1,5 +1,4 @@ { - "id": "1", "fields": { "name": "Brewpub-on-the-Green", "city": "Fremont", @@ -8,7 +7,6 @@ "country": "United States", "phone": "", "website": "", - "type": "brewery", "updated": "2010-07-22 20:00:20", "description": "", "address": [], @@ -16,6 +14,7 @@ "accuracy": "APPROXIMATE", "lat": 37.5483, "lon": -121.989 - } + }, + "_type": "geo_example" } } diff --git a/example/geo_index_mapping.json b/examples/geo_example_mapping.json similarity index 60% rename from example/geo_index_mapping.json rename to examples/geo_example_mapping.json index f067367..ba7769e 100644 --- a/example/geo_index_mapping.json +++ b/examples/geo_example_mapping.json @@ -1,6 +1,6 @@ { "types": { - "brewery": { + "geo_example": { "properties": { "name": { "fields": [ @@ -32,5 +32,23 @@ } } }, - "default_type": "brewery" + "default_mapping": { + "enabled": true, + "dynamic": true, + "default_analyzer": "standard" + }, + "type_field": "_type", + "default_type": "_default", + "default_analyzer": "standard", + "default_datetime_parser": "dateTimeOptional", + "default_field": "_all", + "store_dynamic": true, + "index_dynamic": true, + "analysis": { + "analyzers": {}, + "char_filters": {}, + "tokenizers": {}, + "token_filters": {}, + "token_maps": {} + } } diff --git a/example/geo_search_request.json b/examples/geo_example_search_request.json similarity index 100% rename from example/geo_search_request.json rename to examples/geo_example_search_request.json diff --git a/examples/multiple_type_example_bulk_index.json b/examples/multiple_type_example_bulk_index.json new file mode 100644 index 0000000..b8ab6ff --- /dev/null +++ b/examples/multiple_type_example_bulk_index.json @@ -0,0 +1,36 @@ +{"id":"ar_1","fields":{"title_ar":"محرك بحث","text_ar":"محرك البحث (بالإنجليزية: Search engine) هو نظام لإسترجاع المعلومات صمم للمساعدة على البحث عن المعلومات المخزنة على أي نظام حاسوبي. تعرض نتائج البحث عادة على شكل قائمة لأماكن تواجد المعلومات ومرتبة وفق معايير معينة. تسمح محركات البحث باختصار مدة البحث والتغلب على مشكلة أحجام البيانات المتصاعدة (إغراق معلوماتي).","timestamp":"2018-03-25T18:04:00Z","_type":"ar"}} +{"id":"bg_1","fields":{"title_bg":"Търсачка","text_bg":"Търсачка или търсеща машина (на английски: Web search engine) е специализиран софтуер за извличане на информация, съхранена в компютърна система или мрежа. Това може да е персонален компютър, Интернет, корпоративна мрежа и т.н. Без допълнителни уточнения, най-често под търсачка се разбира уеб(-)търсачка, която търси в Интернет. Други видове търсачки са корпоративните търсачки, които търсят в интранет мрежите, личните търсачки – за индивидуалните компютри и мобилните търсачки. В търсачката потребителят (търсещият) прави запитване за съдържание, отговарящо на определен критерий (обикновено такъв, който съдържа определени думи и фрази). В резултат се получават списък от точки, които отговарят, пълно или частично, на този критерий. Търсачките обикновено използват редовно подновявани индекси, за да оперират бързо и ефикасно. Някои търсачки също търсят в информацията, която е на разположение в нюзгрупите и други големи бази данни. За разлика от Уеб директориите, които се поддържат от хора редактори, търсачките оперират алгоритмично. Повечето Интернет търсачки са притежавани от различни корпорации.","timestamp":"2018-07-11T11:03:00Z","_type":"bg"}} +{"id":"ca_1","fields":{"title_ca":"Motor de cerca","text_ca":"Un motor de cerca o de recerca o bé cercador és un programa informàtic dissenyat per ajudar a trobar informació emmagatzemada en un sistema informàtic com ara una xarxa, Internet, un servidor o un ordinador personal. L'objectiu principal és el de trobar altres programes informàtics, pàgines web i documents, entre d'altres. A partir d'una determinada paraula o paraules o una determinada frase l'usuari demana un contingut sota un criteri determinat i retorna una llista de referències que compleixin aquest criteri. El procés es realitza a través de les metadades, vies per comunicar informació que utilitzen els motors per cada cerca. Els índex que utilitzen els cercadors sempre estan actualitzats a través d'un robot web per generar rapidesa i eficàcia en la recerca. Els directoris, en canvi, són gestionats per editors humans.","timestamp":"2018-07-09T18:07:00Z","_type":"ca"}} +{"id":"cs_1","fields":{"title_cs":"Vyhledávač","text_cs":"Vyhledávač je počítačový systém či program, který umožňuje uživateli zadat nějaký libovolný nebo specifikovaný vyhledávaný výraz a získat z velkého objemu dat informace, které jsou v souladu s tímto dotazem. Jako vyhledávač se označují i ​​webové stránky, jejichž hlavní funkcí je poskytování takového systému či programu. Jako internetový vyhledávač se označuje buď vyhledávač, na který se přistupuje přes internet, nebo vyhledávač, jehož zdrojem vyhledávání je internet (tj. WWW, Usenet apod.). Jako online vyhledávač se označuje vyhledávač, při jehož výkonu činnosti dochází k výměně dat v rámci nějaké počítačové sítě, nejčastěji to je internetový vyhledávač. Fulltextový vyhledávač je vyhedávač, který vykonává fulltextové vyhledávání.","timestamp":"2017-11-10T21:59:00Z","_type":"cs"}} +{"id":"da_1","fields":{"title_da":"Søgemaskine","text_da":"En søgemaskine er en applikation til at hjælpe en bruger med at finde information. Det kan f.eks. være at finde filer med bestemte data (f.eks. ord), gemt i en computers hukommelse, for eksempel via World Wide Web (kaldes så en websøgemaskine). Ofte bruges søgemaskine fejlagtigt om linkkataloger eller Netguider.","timestamp":"2017-09-04T01:54:00Z","_type":"da"}} +{"id":"de_1","fields":{"title_de":"Suchmaschine","text_de":"Eine Suchmaschine ist ein Programm zur Recherche von Dokumenten, die in einem Computer oder einem Computernetzwerk wie z. B. dem World Wide Web gespeichert sind. Internet-Suchmaschinen haben ihren Ursprung in Information-Retrieval-Systemen. Sie erstellen einen Schlüsselwort-Index für die Dokumentbasis, um Suchanfragen über Schlüsselwörter mit einer nach Relevanz geordneten Trefferliste zu beantworten. Nach Eingabe eines Suchbegriffs liefert eine Suchmaschine eine Liste von Verweisen auf möglicherweise relevante Dokumente, meistens dargestellt mit Titel und einem kurzen Auszug des jeweiligen Dokuments. Dabei können verschiedene Suchverfahren Anwendung finden.","timestamp":"2017-09-04T01:54:00Z","_type":"de"}} +{"id":"el_1","fields":{"title_el":"Μηχανή αναζήτησης","text_el":"Μια μηχανή αναζήτησης είναι μια εφαρμογή που επιτρέπει την αναζήτηση κειμένων και αρχείων στο Διαδίκτυο. Αποτελείται από ένα πρόγραμμα υπολογιστή που βρίσκεται σε έναν ή περισσότερους υπολογιστές στους οποίους δημιουργεί μια βάση δεδομένων με τις πληροφορίες που συλλέγει από το διαδίκτυο, και το διαδραστικό περιβάλλον που εμφανίζεται στον τελικό χρήστη ο οποίος χρησιμοποιεί την εφαρμογή από άλλον υπολογιστή συνδεδεμένο στο διαδίκτυο. Οι μηχανές αναζήτησης αποτελούνται από 3 είδη λογισμικού, το spider software, το index software και το query software.","timestamp":"2017-11-21T19:57:00Z","_type":"el"}} +{"id":"en_1","fields":{"title_en":"Search engine (computing)","text_en":"A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.","timestamp":"2018-07-04T05:41:00Z","_type":"en"}} +{"id":"es_1","fields":{"title_es":"Motor de búsqueda","text_es":"Un motor de búsqueda o buscador es un sistema informático que busca archivos almacenados en servidores web gracias a su spider (también llamado araña web). Un ejemplo son los buscadores de Internet (algunos buscan únicamente en la web, pero otros lo hacen además en noticias, servicios como Gopher, FTP, etc.) cuando se pide información sobre algún tema. Las búsquedas se hacen con palabras clave o con árboles jerárquicos por temas; el resultado de la búsqueda «Página de resultados del buscador» es un listado de direcciones web en los que se mencionan temas relacionados con las palabras clave buscadas. Como operan de forma automática, los motores de búsqueda contienen generalmente más información que los directorios. Sin embargo, estos últimos también han de construirse a partir de búsquedas (no automatizadas) o bien a partir de avisos dados por los creadores de páginas.","timestamp":"2018-08-30T11:30:00Z","_type":"es"}} +{"id":"fa_1","fields":{"title_fa":"موتور جستجو (پردازش)","text_fa":"موتور جستجو یا جویشگر، در فرهنگ رایانه، به طور عمومی به برنامه‌ای گفته می‌شود که کلمات کلیدی را در یک سند یا بانک اطلاعاتی جستجو می‌کند. در اینترنت به برنامه‌ای گفته می‌شود که کلمات کلیدی موجود در فایل‌ها و سندهای وب جهانی، گروه‌های خبری، منوهای گوفر و آرشیوهای FTP را جستجو می‌کند. جویشگرهای زیادی وجود دارند که امروزه از معروفترین و پراستفاده‌ترین آنها می‌توان به google و یاهو! جستجو اشاره کرد.","timestamp":"2017-01-06T02:46:00Z","_type":"fa"}} +{"id":"fi_1","fields":{"title_fi":"Hakukone","text_fi":"Hakukone on web-pohjainen ohjelma, joka etsii jatkuvasti Internetistä (varsinkin Webistä) uusia sivuja eritellen ja liittäen ne hakemistoonsa erityisten hakusanojen mukaan. Näitä hyväksi käyttäen hakukone tulostaa käyttäjän syöttämiä hakusanoja lähimpänä olevat sivut. Analysointi tapahtuu käytännössä eri hakukoneissa erilaisilla menetelmillä.","timestamp":"2017-10-04T14:33:00Z","_type":"fi"}} +{"id":"fr_1","fields":{"title_fr":"Moteur de recherche","text_fr":"Un moteur de recherche est une application web permettant de trouver des ressources à partir d'une requête sous forme de mots. Les ressources peuvent être des pages web, des articles de forums Usenet, des images, des vidéos, des fichiers, etc. Certains sites web offrent un moteur de recherche comme principale fonctionnalité ; on appelle alors « moteur de recherche » le site lui-même. Ce sont des instruments de recherche sur le web sans intervention humaine, ce qui les distingue des annuaires. Ils sont basés sur des « robots », encore appelés « bots », « spiders «, « crawlers » ou « agents », qui parcourent les sites à intervalles réguliers et de façon automatique pour découvrir de nouvelles adresses (URL). Ils suivent les liens hypertextes qui relient les pages les unes aux autres, les uns après les autres. Chaque page identifiée est alors indexée dans une base de données, accessible ensuite par les internautes à partir de mots-clés. C'est par abus de langage qu'on appelle également « moteurs de recherche » des sites web proposant des annuaires de sites web : dans ce cas, ce sont des instruments de recherche élaborés par des personnes qui répertorient et classifient des sites web jugés dignes d'intérêt, et non des robots d'indexation. Les moteurs de recherche ne s'appliquent pas qu'à Internet : certains moteurs sont des logiciels installés sur un ordinateur personnel. Ce sont des moteurs dits « de bureau » qui combinent la recherche parmi les fichiers stockés sur le PC et la recherche parmi les sites Web — on peut citer par exemple Exalead Desktop, Google Desktop et Copernic Desktop Search, Windex Server, etc. On trouve également des métamoteurs, c'est-à-dire des sites web où une même recherche est lancée simultanément sur plusieurs moteurs de recherche, les résultats étant ensuite fusionnés pour être présentés à l'internaute. On peut citer dans cette catégorie Ixquick, Mamma, Kartoo, Framabee ou Lilo.","timestamp":"2018-05-30T15:15:00Z","_type":"fr"}} +{"id":"ga_1","fields":{"title_ga":"Inneall cuardaigh","text_ga":"Acmhainn ar an ngréasán domhanda atá insroichte le brabhsálaí Gréasáin, a chabhraíonn leis an úsáideoir ionaid is eolas a aimsiú. Bíonn na hinnill cuardaigh (Yahoo, Lycos, Google, Ask Jeeves) ag cuardach tríd an ngréasán an t-am ar fad, ag tógáil innéacsanna ábhar éagsúla — mar shampla, ag aimsiú teidil, fotheidil, eochairfhocail is céadlínte cáipéisí. Uaidh sin, is féidir cuid mhaith cáipéisí éagsúla ar ábhar ar leith a aisghabháil. Déanann an cuardach leanúnach cinnte de go bhfuil na hinnéacsanna suas chun dáta. Mar sin féin, aisghabhann na hinnill an-chuid cháipéisí nach mbaineann le hábhar, agus tá an-iarracht ar siúl an t-am ar fad iad a fheabhsú.","timestamp":"2013-10-27T18:17:00Z","_type":"ga"}} +{"id":"gl_1","fields":{"title_gl":"Motor de busca","text_gl":"Un motor de busca ou buscador é un sistema informático que procura arquivos almacenados en servidores web, un exemplo son os buscadores de internet (algúns buscan só na Web pero outros buscan ademais en News, Gopher, FTP etc.) cando lles pedimos información sobre algún tema. As procuras fanse con palabras clave ou con árbores xerárquicas por temas; o resultado da procura é unha listaxe de direccións Web nas que se mencionan temas relacionados coas palabras clave buscadas.","timestamp":"2016-10-31T13:33:00Z","_type":"gl"}} +{"id":"gu_1","fields":{"title_gu":"વેબ શોધ એન્જીન","text_gu":"વેબ શોધ એન્જિન એ વર્લ્ડ વાઈડ વેબ (World Wide Web) પર વિવિધ માહિતી શોધવા માટે ઉપયોગમાં લેવામાં આવે છે. શોધ લીસ્ટને સામાન્ય રીતે યાદીમાં દર્શાવવામાં આવે છે અને જેને સામાન્ય રીતે હીટ્સ કહેવામાં આવે છે. જે માહિતી મળે છે તેમાં વેબ પૃષ્ઠ (web page), છબીઓ, માહિતી અને અન્ય પ્રકારની ફાઈલો હોય છે. કેટલાક શોધ એન્જિનો ન્યુઝબુક, ડેટાબેઝ અને અન્ય પ્રકારની ઓપન ડીરેક્ટરી (open directories)ઓની વિગતો પણ આપે છે. વ્યકિતઓ દ્વારા દુરસ્ત થતી વેબ ડાયરેક્ટરીઝ (Web directories)થી અલગ રીતે, શોધ એન્જિન ઍલ્ગરિધમનો અથવા ઍલ્ગરિધમ (algorithmic) અને માનવીય બાબતોના મિક્ષણનો ઉપયોગ કરે છે.","timestamp":"2013-04-04T19:28:00Z","_type":"gu"}} +{"id":"hi_1","fields":{"title_hi":"खोज इंजन","text_hi":"ऐसे कम्प्यूटर प्रोग्राम खोजी इंजन (search engine) कहलाते हैं जो किसी कम्प्यूटर सिस्टम पर भण्डारित सूचना में से वांछित सूचना को ढूढ निकालते हैं। ये इंजन प्राप्त परिणामों को प्रायः एक सूची के रूप में प्रस्तुत करते हैं जिससे वांछित सूचना की प्रकृति और उसकी स्थिति का पता चलता है। खोजी इंजन किसी सूचना तक अपेक्षाकृत बहुत कम समय में पहुँचने में हमारी सहायता करते हैं। वे 'सूचना ओवरलोड' से भी हमे बचाते हैं। खोजी इंजन का सबसे प्रचलित रूप 'वेब खोजी इंजन' है जो वर्ल्ड वाइड वेब पर सूचना खोजने के लिये प्रयुक्त होता है।","timestamp":"2017-10-19T20:09:00Z","_type":"hi"}} +{"id":"hu_1","fields":{"title_hu":"Keresőmotor","text_hu":"A keresőmotor az informatikában egy program vagy alkalmazás, amely bizonyos feltételeknek (többnyire egy szónak vagy kifejezésnek) megfelelő információkat keres valamilyen számítógépes környezetben. Ez a cikk a World Wide Weben (és esetleg az internet más részein, például a Useneten) kereső alkalmazásokról szól, a keresőmotor kifejezés önmagában általában ezekre vonatkozik. Másfajta keresőmotorokra példák a vállalati keresőmotorok, amik egy intraneten, és a személyi keresőmotorok, amik egy személyi számítógép állományai között keresnek.","timestamp":"2018-05-15T20:40:00Z","_type":"hu"}} +{"id":"hy_1","fields":{"title_hy":"Որոնողական համակարգ","text_hy":"Որոնողական համակարգը գործիք է, որը նախատեսված է համապատասխան բառերով Համաշխարհային ցանցում որոնումներ կատարելու համար։ Ստեղծված է համացանցում և FTP սերվերներում ինֆորմացիա փնտրելու համար։ Փնտրված արդյունքները ընդհանրապես ներկայացվում են արդյունքների ցանկում և սովորաբար կոչվում են նպատակակակետ, հիթ։ Ինֆորմացիան կարող է բաղկացած լինել վեբ էջերից, նկարներից, ինֆորմացիաներից և այլ տիպի ֆայլերից ու տվյալներից։ Այն կարող է օգտագործվել տարբեր տեսակի տեղեկատվություն որոնելու համար, ներառյալ՝ կայքեր, ֆորումներ, նկարներ, վիդեոներ, ֆայլեր և այլն։ Որոշ կայքեր արդեն իրենցից ներկայացնում են ինչ-որ որոնողական համակարգ, օրինակ՝ Dailymotion, YouTube և Google Videos ինտերնետում տեղադրված տեսահոլովակների որոնողական կայքեր են։ Որոնողական կայքը բաղկացած է \"ռոբոտներից\", որոնց անվանում են նաև bot, spider, crawler, որոնք ավտոմատ կերպով, առանց մարդկային միջամտության պարբերաբար հետազոտում են կայքերը։ Որոնողական կայքերը հետևում են հղումներին, որոնք կապված լինելով իրար հետ ինդեքսավորում է յուրաքանչյուր էջ տվյալների բազայում՝ հետագայում բանալի բառերի օգնությամբ դառնալով հասանելի ինտերնետից օգտվողների համար։ Սխալմամբ, որոնողական կայքեր են անվանում նաև այն կայքերը, որոնք իրենցից ներկայացնում են կայքային տեղեկատուներ։ Այս կայքերում ուշադրության արժանի կայքերը ցուցակագրվում և դասակարգվում են մարդկային ռեսուրսների շնորհիվ, այլ ոչ թե բոտերի կամ ռոբետների միջոցով։ Այդ կայքերից կարելի է նշել օրինակ՝ Yahoo!։ Yahoo!-ի որոնողական կայքը գտնվում է այստեղ։ Բոլոր որոնողական համակարգերը նախատեսված են ինտերնետում որոնում իրականացնելու համար, սակայն կան որոշ որոնողական համակարգերի տարատեսակներ, որոնք համակարգչային ծրագրեր են և հետևաբար տեղակայվում են համակարգչի մեջ։ Այս համակարգերը կոչվում են desktop։ Վերջիներս հնարավորություն են տալիս որոնելու թե համակարգչի մեջ կուտակված ֆայլեը, թե կայքերում տեղադրված ռեսուրսները։ Այդ ծրագրերից ամենահայտնիներն են՝ Exalead Desktop, Copernic Desktop Search Գոյություն ունեն նաև մետա-որոնողական համակարգեր, այսինքն կայքեր, որ նույն որոնումը կատարում են միաժամանակ տարբեր որոնողական կայքերի միջնորդությամբ։ Որոնման արդյունքները հետո դասակարգվում են որպեսզի ներկայացվեն օգտագործողին։ Մետա-որոնողական համակարգերի շարքից կարելի է թվարկել օրինակ՝ Mamma և Kartoo։","timestamp":"2017-11-20T17:47:00Z","_type":"hy"}} +{"id":"id_1","fields":{"title_id":"Mesin pencari web","text_id":"Mesin pencari web atau mesin telusur web (bahasa Inggris: web search engine) adalah program komputer yang dirancang untuk melakukan pencarian atas berkas-berkas yang tersimpan dalam layanan www, ftp, publikasi milis, ataupun news group dalam sebuah ataupun sejumlah komputer peladen dalam suatu jaringan. Mesin pencari merupakan perangkat penelusur informasi dari dokumen-dokumen yang tersedia. Hasil pencarian umumnya ditampilkan dalam bentuk daftar yang seringkali diurutkan menurut tingkat akurasi ataupun rasio pengunjung atas suatu berkas yang disebut sebagai hits. Informasi yang menjadi target pencarian bisa terdapat dalam berbagai macam jenis berkas seperti halaman situs web, gambar, ataupun jenis-jenis berkas lainnya. Beberapa mesin pencari juga diketahui melakukan pengumpulan informasi atas data yang tersimpan dalam suatu basis data ataupun direktori web. Sebagian besar mesin pencari dijalankan oleh perusahaan swasta yang menggunakan algoritme kepemilikan dan basis data tertutup, di antaranya yang paling populer adalah safari Google (MSN Search dan Yahoo!). Telah ada beberapa upaya menciptakan mesin pencari dengan sumber terbuka (open source), contohnya adalah Htdig, Nutch, Egothor dan OpenFTS.","timestamp":"2017-11-20T17:47:00Z","_type":"id"}} +{"id":"it_1","fields":{"title_it":"Motore di ricerca","text_it":"Nell'ambito delle tecnologie di Internet, un motore di ricerca (in inglese search engine) è un sistema automatico che, su richiesta, analizza un insieme di dati (spesso da esso stesso raccolti) e restituisce un indice dei contenuti disponibili[1] classificandoli in modo automatico in base a formule statistico-matematiche che ne indichino il grado di rilevanza data una determinata chiave di ricerca. Uno dei campi in cui i motori di ricerca trovano maggiore utilizzo è quello dell'information retrieval e nel web. I motori di ricerca più utilizzati nel 2017 sono stati: Google, Bing, Baidu, Qwant, Yandex, Ecosia, DuckDuckGo.","timestamp":"2018-07-16T12:20:00Z","_type":"it"}} +{"id":"ja_1","fields":{"title_ja":"検索エンジン","text_ja":"検索エンジン(けんさくエンジン、英語: search engine)は、狭義にはインターネットに存在する情報(ウェブページ、ウェブサイト、画像ファイル、ネットニュースなど)を検索する機能およびそのプログラム。インターネットの普及初期には、検索としての機能のみを提供していたウェブサイトそのものを検索エンジンと呼んだが、現在では様々なサービスが加わったポータルサイト化が進んだため、検索をサービスの一つとして提供するウェブサイトを単に検索サイトと呼ぶことはなくなっている。広義には、インターネットに限定せず情報を検索するシステム全般を含む。狭義の検索エンジンは、ロボット型検索エンジン、ディレクトリ型検索エンジン、メタ検索エンジンなどに分類される。広義の検索エンジンとしては、ある特定のウェブサイト内に登録されているテキスト情報の全文検索機能を備えたソフトウェア(全文検索システム)等がある。検索エンジンは、検索窓と呼ばれるボックスにキーワードを入力して検索をかけるもので、全文検索が可能なものと不可能なものとがある。検索サイトを一般に「検索エンジン」と呼ぶことはあるが、厳密には検索サイト自体は検索エンジンでない。","timestamp":"2018-05-30T00:52:00Z","_type":"ja"}} +{"id":"kn_1","fields":{"title_kn":"ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ","text_kn":"ಅಂತರ್ಜಾಲ ಹುಡುಕಾಟ ಯಂತ್ರ ಎಂದರೆ World Wide Webನಲ್ಲಿ ಮಾಹಿತಿ ಹುಡುಕುವುದಕ್ಕಾಗಿ ವಿನ್ಯಾಸಗೊಳಿಸಲಾದ ಒಂದು ಸಾಧನ. ಹುಡುಕಾಟದ ಫಲಿತಾಂಶಗಳನ್ನು ಸಾಮಾನ್ಯವಾಗಿ ಒಂದು ಪಟ್ಟಿಯ ರೂಪದಲ್ಲಿ ಪ್ರಸ್ತುತಪಡಿಸಲಾಗುತ್ತದೆ ಮತ್ತು ಇವನ್ನು ’ಹಿಟ್ಸ್’ ಎಂದು ಕರೆಯಲಾಗುತ್ತದೆ. ಈ ಮಾಹಿತಿಯು ಅನೇಕ ಜಾಲ ಪುಟಗಳು, ಚಿತ್ರಗಳು, ಮಾಹಿತಿ ಹಾಗೂ ಇತರೆ ಕಡತಗಳನ್ನು ಹೊಂದಿರಬಹುದು. ಕೆಲವು ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಬೇರೆ ದತ್ತಸಂಚಯಗಳು ಅಥವಾ ಮುಕ್ತ ಮಾಹಿತಿ ಸೂಚಿಗಳಿಂದ ದತ್ತಾಂಶಗಳ ಗಣಿಗಾರಿಕೆ ಮಾಡಿ ಹೊರತೆಗೆಯುತ್ತವೆ. ಜಾಲ ಮಾಹಿತಿಸೂಚಿಗಳನ್ನು ಸಂಬಂಧಿಸಿದ ಸಂಪಾದಕರು ನಿರ್ವಹಿಸಿದರೆ, ಹುಡುಕಾಟ ಯಂತ್ರಗಳು ಗಣನಪದ್ಧತಿಯ ಮೂಲಕ ಅಥವಾ ಗಣನಪದ್ಧತಿ ಮತ್ತು ಮಾನವ ಹೂಡುವಳಿಯ ಮಿಶ್ರಣದ ಮುಖಾಂತರ ಕಾರ್ಯನಿರ್ವಹಿಸುತ್ತವೆ.","timestamp":"2017-10-03T14:13:00Z","_type":"kn"}} +{"id":"ko_1","fields":{"title_cjk":"검색 엔진","text_cjk":"검색 엔진은 컴퓨터 시스템에 저장된 정보를 찾아주는 것을 도와주도록 설계된 정보 검색 시스템이다. 이러한 검색 결과는 목록으로 표현되는 것이 보통이다. 검색 엔진을 사용하면 정보를 찾는데 필요한 시간을 최소화할 수 있다. 가장 눈에 띄는 형태의 공용 검색 엔진으로는 웹 검색 엔진이 있으며 월드 와이드 웹에서 정보를 찾아준다.","timestamp":"2017-11-19T12:50:00Z","_type":"ko"}} +{"id":"ml_1","fields":{"title_ml":"വെബ് സെർച്ച് എഞ്ചിൻ","text_ml":"വേൾഡ് വൈഡ് വെബ്ബിലുള്ള വിവരങ്ങൾ തിരയാനുള്ള ഒരു ഉപാധിയാണ്‌ വെബ് സെർച്ച് എഞ്ചിൻ അഥവാ സെർച്ച് എഞ്ചിൻ. തിരച്ചിൽ ഫലങ്ങൾ സാധാരണായായി ഒരു പട്ടികയായി നൽകുന്നു, തിരച്ചിൽ ഫലങ്ങളെ ഹിറ്റുകൾ എന്നാണ്‌ വിളിച്ചുവരുന്നത്[അവലംബം ആവശ്യമാണ്]. തിരച്ചിൽ ഫലങ്ങളിൽ വെബ് പേജുകൾ, ചിത്രങ്ങൾ, വിവരങ്ങൾ, വെബ്ബിലുള്ള മറ്റ് ഫയൽ തരങ്ങൾ എന്നിവ ഉൾപ്പെടാം. അൽഗോരിതങ്ങൾ ഉപയോഗിച്ചാണ് സെർച്ച് എഞ്ചിനുകൾ പ്രവർത്തിക്കുന്നത്.","timestamp":"2010-05-05T15:06:00Z","_type":"ml"}} +{"id":"nl_1","fields":{"title_nl":"Zoekmachine","text_nl":"Een zoekmachine is een computerprogramma waarmee informatie kan worden gezocht in een bepaalde collectie; dit kan een bibliotheek, het internet, of een persoonlijke verzameling zijn. Zonder nadere aanduiding wordt meestal een webdienst bedoeld waarmee met behulp van vrije trefwoorden volledige tekst (full text) kan worden gezocht in het gehele wereldwijde web. In tegenstelling tot startpagina's of webgidsen is er geen of zeer weinig menselijke tussenkomst nodig; het bezoeken van de webpagina's en het sorteren van de rangschikkingen gebeurt met behulp van een algoritme. Google is wereldwijd de meest gebruikte zoekmachine, andere populaire zoekmachines zijn Yahoo!, Bing en Baidu.","timestamp":"2018-05-07T11:05:00Z","_type":"nl"}} +{"id":"no_1","fields":{"title_no":"Søkemotor","text_no":"En søkemotor er en type programvare som leter frem informasjon fra Internett (nettsider eller andre nettressurser) eller begrenset til et datasystem, der informasjonen samsvarer med et gitt søk, og rangerer treffene etter hva den oppfatter som mest relevant. Typisk ligger søkemotoren tilgjengelig som et nettsted, der brukeren legger inn søkeord ev. sammen med filterinnstillinger, og treffene vises gjerne som klikkbare lenker. Søkemotoren kan enten gjøre søk på hele Internett (for eksempel Google, Bing, Kvasir og Yahoo!), innenfor et bestemt nettsted (for eksempel søk innenfor VGs nettavis), eller innenfor et bestemt tema (f.eks. Kelkoo, som søker etter priser på produkter, og Picsearch, som søker etter bilder). En bedrift kan også sette opp en intern bedrifts-søkemotor for å få enklere tilgang til alle dokumenter og databaser i bedriften.","timestamp":"2018-02-05T14:15:00Z","_type":"no"}} +{"id":"ps_1","fields":{"title_ps":"انټرنټ لټوونکی ماشين","text_ps":"نټرنټ د معلوماتو يوه داسې پراخه نړۍ ده چې يوه پوله هم نه لري. هره ثانيه په زرگونو معلوماتي توکي په کې ورځای کېږي، خو بيا هم د ډکېدو کومه اندېښنه نه رامنځته کېږي. حيرانوونکې خبره بيا دا ده چې دغه ټول معلومات په داسې مهارت سره په دغه نړۍ کې ځای شوي دي، چې سړی يې د سترگو په رپ کې د نړۍ په هر گوټ کې ترلاسه کولای شي. د کيبورډ په يو دوو تڼيو زور کولو او د موږك په يو دوو کليکونو سره خپلو ټولو پوښتنو ته ځواب موندلای شئ. ټول معلومات په ځانگړو انټرنټ پاڼو کې خوندي وي، نو که سړي ته د يوې پاڼې پته معلومه وي نو سم له لاسه به دغه پاڼه د انټرنټ پاڼو په کتونکي پروگرام کې پرانيزي، خو که سړی بيا يو معلومات غواړي او د هغې پاڼې پته ورسره نه وي، چې دغه ځانگړي معلومات په كې ځای شوي دي، نو بيا سړی يوه داسې پياوړي ځواک ته اړتيا لري، چې د سترگو په رپ کې ټول انټرنټ چاڼ کړي او دغه ځانگړي معلومات راوباسي. له نېکه مرغه د دغه ځواک غم خوړل شوی دی او ډېرInternet Search Engine انټرنټ لټوونکي ماشينونه جوړ کړای شوي دي، چې په وړيا توگه ټول انټرنټ تر ثانيو هم په لږ وخت کې چاڼ کوي او زموږ د خوښې معلومات راښکاره کوي. دغو ماشينونو ته سړی يوه ځانگړې کليمه ورکوي او هغوی ټول انټرنټ په دغې وركړل شوې کلمې پسې لټوي او هر دقيق معلومات چې لاسته ورځي، نو د کمپيوټر پر پرده يې راښکاره کوي. د دغو ماشينونو په ډله کې يو پياوړی ماشين د Google په نوم دی. د نوموړي ماشين بنسټ په ١٩٩٨م کال کې د متحدو ايالاتو د Standford پوهنتون دوو محصلينو Larry Page او Sergey Brin کښېښود. د دغه ماشين خدمات سړی د www.google.com په انټرنټ پاڼه کې کارولای شي. نوموړی ماشين د نړۍ په گڼ شمېر ژبو باندې خدمات وړاندې کوي او داسې چټک او دقيق لټون کوي چې د انټرنټ نور ډېر غښتلي ماشينونه ورته گوته پر غاښ پاتې دي. گوگل په ټوله نړۍ کې کارول کېږي او تر نيمي ثانيي هم په لنډ وخت کې په ميليارډونو انټرنټ پاڼې چاڼ کوي او خپلو کاروونکو ته په پرتله ييزه توگه دقيق معلومات راباسي. گوگل په يوه ورځ کې څه كمُ ٢٠٠ ميليونه پوښتنې ځوابوي. دا ( گوگل) تورى خپله د يو امريکايي رياضيپوه د وراره له خوا په لومړي ځل د يوې لوبې لپاره کارول شوی و. هغه دغه تورى د يو سلو صفرونو ( 1000?.) غوندې لوی عدد ته د نوم په توگه کاراوه. دغه نوم د نوموړي شرکت د دغه توان ښكارندوى دى، چې په لنډ وخت کې په لويه کچه پوښتنو ته ځواب ورکوي او معلومات لټوي. سړی چې د گوگل چټکتيا او دقيقوالي ته ځير شي، نو دا پوښته راپورته کېږي چې د دې ماشين شا ته به څومره پرمختللي کمپيوټرونه او پياوړی تخنيک پټ وي. خو اصلاً د گوگل شا ته په يوه لوی جال کې د منځنۍ بيې کمپيوټرونه سره نښلول شوي دي . په دې توگه په زرگونو کمپيوټرونه هممهاله په کار بوخت وي، چې په ترڅ کې يې د معلوماتو لټول او چاڼ کول چټکتيا مومي. د يوې پوښتنې له اخيستلو څخه راواخله معلوماتو تر لټولو او بيا د دقيقوالي له مخې په يوه ځانگړي طرز بېرته کاروونکي يا پوښتونكي تر ښوولو پورې ټولې چارې د درېيو Software پروگرامونه په لاس کې دي، چې په دغه زرگونو کمپيوټرونو کې ځای پر ځای شوي دي.","timestamp":"2015-12-15T18:53:00Z","_type":"ps"}} +{"id":"pt_1","fields":{"title_pt":"Motor de busca","text_pt":"Motor de pesquisa (português europeu) ou ferramenta de busca (português brasileiro) ou buscador (em inglês: search engine) é um programa desenhado para procurar palavras-chave fornecidas pelo utilizador em documentos e bases de dados. No contexto da internet, um motor de pesquisa permite procurar palavras-chave em documentos alojados na world wide web, como aqueles que se encontram armazenados em websites. Os motores de busca surgiram logo após o aparecimento da Internet, com a intenção de prestar um serviço extremamente importante: a busca de qualquer informação na rede, apresentando os resultados de uma forma organizada, e também com a proposta de fazer isto de uma maneira rápida e eficiente. A partir deste preceito básico, diversas empresas se desenvolveram, chegando algumas a valer milhões de dólares. Entre as maiores empresas encontram-se o Google, o Yahoo, o Bing, o Lycos, o Cadê e, mais recentemente, a Amazon.com com o seu mecanismo de busca A9 porém inativo. Os buscadores se mostraram imprescindíveis para o fluxo de acesso e a conquista novos visitantes. Antes do advento da Web, havia sistemas para outros protocolos ou usos, como o Archie para sites FTP anônimos e o Veronica para o Gopher (protocolo de redes de computadores que foi desenhado para indexar repositórios de documentos na Internet, baseado-se em menus).","timestamp":"2017-11-09T14:38:00Z","_type":"pt"}} +{"id":"ro_1","fields":{"title_ro":"Motor de căutare","text_ro":"Un motor de căutare este un program apelabil căutător, care accesează Internetul în mod automat și frecvent și care stochează titlul, cuvinte cheie și, parțial, chiar conținutul paginilor web într-o bază de date. În momentul în care un utilizator apelează la un motor de căutare pentru a găsi o informație, o anumită frază sau un cuvânt, motorul de căutare se va uita în această bază de date și, în funcție de anumite criterii de prioritate, va crea și afișa o listă de rezultate (engleză: hit list ).","timestamp":"2018-06-12T08:59:00Z","_type":"ro"}} +{"id":"ru_1","fields":{"title_ru":"Поисковая машина","text_ru":"Поисковая машина (поиско́вый движо́к) — комплекс программ, предназначенный для поиска информации. Обычно является частью поисковой системы. Основными критериями качества работы поисковой машины являются релевантность (степень соответствия запроса и найденного, т.е. уместность результата), полнота индекса, учёт морфологии языка.","timestamp":"2017-03-22T01:16:00Z","_type":"ru"}} +{"id":"sv_1","fields":{"title_sv":"Söktjänst","text_sv":"En söktjänst är en webbplats som gör det möjligt att söka efter innehåll på Internet. Söktjänsterna använder sökmotorer, även kallade sökrobotar, för att upptäcka, hämta in och indexera webbsidor.","timestamp":"2018-08-16T22:13:00Z","_type":"sv"}} +{"id":"ta_1","fields":{"title_ta":"தேடுபொறி","text_ta":"தேடுபொறி அல்லது தேடற்பொறி என்பது ஒரு கணினி நிரலாகும். இது இணையத்தில் குவிந்து கிடக்கும் தகவல்களில் இருந்தோ கணினியில் இருக்கும் தகவல்களில் இருந்தோ நமக்குத் தேவையான தகவலைப்பெற உதவுகின்றது. பொதுவாகப் பாவனையாளர்கள் ஒரு விடயம் சம்பந்தமாகத் தேடுதலை ஒரு சொல்லை வைத்து தேடுவார்கள். தேடுபொறிகள் சுட்டிகளைப் பயன்படுத்தி விரைவான தேடலை மேற்கொள்ளும். தேடுபொறிகள் என்பது பொதுவாக இணையத் தேடுபொறிகளை அல்லது இணையத் தேடற்பொறிகளையே குறிக்கும். வேறுசில தேடுபொறிகள் உள்ளூர் வலையமைப்பை மாத்திரமே தேடும். இணைய தேடு பொறிகள் பல பில்லியன் பக்கங்களில் இருந்து நமக்குத் தேவையான மிகப் பொருத்தமான பக்கங்களைத் தேடித் தரும். வேறுசில தேடற்பொறிகள் செய்திக் குழுக்கள், தகவற்தளங்கள், திறந்த இணையத்தளங்களைப் பட்டியலிடும் DMOZ.org போன்ற இணையத் தளங்களைத் தேடும். மனிதர்களால் எழுதப்பட்ட இணையத் தளங்களைப் பட்டியலிடும் தளங்களைப் போன்றல்லாது தேடு பொறிகள் அல்காரிதங்களைப் பாவித்துத் தேடல்களை மேற்கொள்ளும். வேறு சில தேடற்பொறிகளோ தமது இடைமுகத்தை வழங்கினாலும் உண்மையில் வேறுசில தேடுபொறிகளே தேடலை மேற்கொள்ளும். ஆரம்ப காலத்தில் ASCII முறை வரியுருக்களை கொண்டே தேடு சொற்களை உள்ளிட முடிந்தது. தற்போது ஒருங்குறி எழுத்துக்குறிமுறையை பல தேடுபொறிகளும் ஆதரிப்பதால் ஆங்கிலத்தில் மட்டுமல்லாது உலக மொழிகள் அனைத்திலும் அவ்வம் மொழிப்பக்கங்களை தேடிப்பெறக்கூடியதாகவுள்ளது.","timestamp":"2017-12-24T10:30:00Z","_type":"ta"}} +{"id":"te_1","fields":{"title_te":"వెబ్ శోధనా యంత్రం","text_te":"వెబ్ శోధన యంత్రం అనేది వరల్డ్ వైడ్ వెబ్/ప్రపంచ వ్యాప్త వెబ్లో సమాచారాన్ని శోదించటానికి తయారుచేసిన ఒక సాధనం. శోధన ఫలితాలు సాధారణంగా ఒక జాబితాలో ఇవ్వబడతాయి మరియు అవి సాధారణంగా హిట్స్ అని పిలువబడతాయి. ఆ సమాచారం వెబ్ పేజీలు, చిత్రాలు, సమాచారం మరియు ఇతర రకాలైన జాబితాలను కలిగి ఉంటుంది.కొన్ని శోధనా యంత్రాలు డేటా బేస్ లు లేదా ఓపెన్ డైరెక్టరీలలో అందుబాటులో ఉన్న సమాచారాన్ని కూడా వెలికితీస్తాయి. మానవ సంపాదకులచే నిర్వహించబడే క్రమపరిచిన వెబ్ డైరెక్టరీల లా కాకుండా, శోధనా యంత్రాలు సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి ద్వారా లేదా సమస్య పరిష్కారానికి ఉపయోగించే ఒక క్రమ పద్దతి మరియు మానవ శక్తిల మిశ్రమంతో పనిచేస్తాయి.","timestamp":"2017-06-19T11:22:00Z","_type":"te"}} +{"id":"th_1","fields":{"title_th":"เสิร์ชเอนจิน","text_th":"เสิร์ชเอนจิน (search engine) หรือ โปรแกรมค้นหา คือ โปรแกรมที่ช่วยในการสืบค้นหาข้อมูล โดยเฉพาะข้อมูลบนอินเทอร์เน็ต โดยครอบคลุมทั้งข้อความ รูปภาพ ภาพเคลื่อนไหว เพลง ซอฟต์แวร์ แผนที่ ข้อมูลบุคคล กลุ่มข่าว และอื่น ๆ ซึ่งแตกต่างกันไปแล้วแต่โปรแกรมหรือผู้ให้บริการแต่ละราย. เสิร์ชเอนจินส่วนใหญ่จะค้นหาข้อมูลจากคำสำคัญ (คีย์เวิร์ด) ที่ผู้ใช้ป้อนเข้าไป จากนั้นก็จะแสดงรายการผลลัพธ์ที่มันคิดว่าผู้ใช้น่าจะต้องการขึ้นมา ในปัจจุบัน เสิร์ชเอนจินบางตัว เช่น กูเกิล จะบันทึกประวัติการค้นหาและการเลือกผลลัพธ์ของผู้ใช้ไว้ด้วย และจะนำประวัติที่บันทึกไว้นั้น มาช่วยกรองผลลัพธ์ในการค้นหาครั้งต่อ ๆ ไป","timestamp":"2016-06-18T11:06:00Z","_type":"th"}} +{"id":"tr_1","fields":{"title_tr":"Arama motoru","text_tr":"Arama motoru, İnternet üzerinde bulunan içeriği aramak için kullanılan bir mekanizmadır. Üç bileşenden oluşur: web robotu, arama indeksi ve kullanıcı arabirimi. Ancak arama sonuçları genellikle sık tıklanan internet sayfalarından oluşan bir liste olarak verilmektedir.","timestamp":"2018-03-13T17:37:00Z","_type":"tr"}} +{"id":"zh_1","fields":{"title_zh":"搜索引擎","text_zh":"搜索引擎(英语:search engine)是一种信息检索系统,旨在协助搜索存储在计算机系统中的信息。搜索结果一般被称为“hits”,通常会以表单的形式列出。网络搜索引擎是最常见、公开的一种搜索引擎,其功能为搜索万维网上储存的信息.","timestamp":"2018-08-27T05:47:00Z","_type":"zh"}} diff --git a/example/wiki_index_mapping.json b/examples/multiple_type_example_mapping.json similarity index 99% rename from example/wiki_index_mapping.json rename to examples/multiple_type_example_mapping.json index ac7c43b..36b6522 100644 --- a/example/wiki_index_mapping.json +++ b/examples/multiple_type_example_mapping.json @@ -1,6 +1,6 @@ { "types": { - "arwiki": { + "ar": { "enabled": true, "dynamic": true, "properties": { @@ -80,7 +80,7 @@ }, "default_analyzer": "ar" }, - "bgwiki": { + "bg": { "enabled": true, "dynamic": true, "properties": { @@ -160,7 +160,7 @@ }, "default_analyzer": "bg" }, - "cawiki": { + "ca": { "enabled": true, "dynamic": true, "properties": { @@ -240,7 +240,7 @@ }, "default_analyzer": "ca" }, - "cswiki": { + "cs": { "enabled": true, "dynamic": true, "properties": { @@ -320,7 +320,7 @@ }, "default_analyzer": "cs" }, - "dawiki": { + "da": { "enabled": true, "dynamic": true, "properties": { @@ -400,7 +400,7 @@ }, "default_analyzer": "da" }, - "dewiki": { + "de": { "enabled": true, "dynamic": true, "properties": { @@ -480,7 +480,7 @@ }, "default_analyzer": "de" }, - "elwiki": { + "el": { "enabled": true, "dynamic": true, "properties": { @@ -560,7 +560,7 @@ }, "default_analyzer": "el" }, - "enwiki": { + "en": { "enabled": true, "dynamic": true, "properties": { @@ -640,7 +640,7 @@ }, "default_analyzer": "en" }, - "eswiki": { + "es": { "enabled": true, "dynamic": true, "properties": { @@ -720,7 +720,7 @@ }, "default_analyzer": "es" }, - "fawiki": { + "fa": { "enabled": true, "dynamic": true, "properties": { @@ -800,7 +800,7 @@ }, "default_analyzer": "fa" }, - "fiwiki": { + "fi": { "enabled": true, "dynamic": true, "properties": { @@ -880,7 +880,7 @@ }, "default_analyzer": "fi" }, - "frwiki": { + "fr": { "enabled": true, "dynamic": true, "properties": { @@ -960,7 +960,7 @@ }, "default_analyzer": "fr" }, - "gawiki": { + "ga": { "enabled": true, "dynamic": true, "properties": { @@ -1040,7 +1040,7 @@ }, "default_analyzer": "ga" }, - "glwiki": { + "gl": { "enabled": true, "dynamic": true, "properties": { @@ -1120,7 +1120,7 @@ }, "default_analyzer": "gl" }, - "guwiki": { + "gu": { "enabled": true, "dynamic": true, "properties": { @@ -1200,7 +1200,7 @@ }, "default_analyzer": "in" }, - "hiwiki": { + "hi": { "enabled": true, "dynamic": true, "properties": { @@ -1280,7 +1280,7 @@ }, "default_analyzer": "hi" }, - "huwiki": { + "hu": { "enabled": true, "dynamic": true, "properties": { @@ -1360,7 +1360,7 @@ }, "default_analyzer": "hu" }, - "hywiki": { + "hy": { "enabled": true, "dynamic": true, "properties": { @@ -1440,7 +1440,7 @@ }, "default_analyzer": "hy" }, - "idwiki": { + "id": { "enabled": true, "dynamic": true, "properties": { @@ -1520,7 +1520,7 @@ }, "default_analyzer": "id" }, - "itwiki": { + "it": { "enabled": true, "dynamic": true, "properties": { @@ -1600,7 +1600,7 @@ }, "default_analyzer": "it" }, - "jawiki": { + "ja": { "enabled": true, "dynamic": true, "properties": { @@ -1680,7 +1680,7 @@ }, "default_analyzer": "ja" }, - "knwiki": { + "kn": { "enabled": true, "dynamic": true, "properties": { @@ -1760,7 +1760,7 @@ }, "default_analyzer": "in" }, - "kowiki": { + "ko": { "enabled": true, "dynamic": true, "properties": { @@ -1840,7 +1840,7 @@ }, "default_analyzer": "cjk" }, - "mlwiki": { + "ml": { "enabled": true, "dynamic": true, "properties": { @@ -1920,7 +1920,7 @@ }, "default_analyzer": "in" }, - "nlwiki": { + "nl": { "enabled": true, "dynamic": true, "properties": { @@ -2000,7 +2000,7 @@ }, "default_analyzer": "nl" }, - "nowiki": { + "no": { "enabled": true, "dynamic": true, "properties": { @@ -2080,7 +2080,7 @@ }, "default_analyzer": "no" }, - "pswiki": { + "ps": { "enabled": true, "dynamic": true, "properties": { @@ -2160,7 +2160,7 @@ }, "default_analyzer": "ckb" }, - "ptwiki": { + "pt": { "enabled": true, "dynamic": true, "properties": { @@ -2240,7 +2240,7 @@ }, "default_analyzer": "pt" }, - "rowiki": { + "ro": { "enabled": true, "dynamic": true, "properties": { @@ -2320,7 +2320,7 @@ }, "default_analyzer": "ro" }, - "ruwiki": { + "ru": { "enabled": true, "dynamic": true, "properties": { @@ -2400,7 +2400,7 @@ }, "default_analyzer": "ru" }, - "svwiki": { + "sv": { "enabled": true, "dynamic": true, "properties": { @@ -2480,7 +2480,7 @@ }, "default_analyzer": "sv" }, - "tawiki": { + "ta": { "enabled": true, "dynamic": true, "properties": { @@ -2560,7 +2560,7 @@ }, "default_analyzer": "in" }, - "tewiki": { + "te": { "enabled": true, "dynamic": true, "properties": { @@ -2640,7 +2640,7 @@ }, "default_analyzer": "in" }, - "thwiki": { + "th": { "enabled": true, "dynamic": true, "properties": { @@ -2720,7 +2720,7 @@ }, "default_analyzer": "th" }, - "trwiki": { + "tr": { "enabled": true, "dynamic": true, "properties": { @@ -2800,7 +2800,7 @@ }, "default_analyzer": "tr" }, - "zhwiki": { + "zh": { "enabled": true, "dynamic": true, "properties": { diff --git a/go.mod b/go.mod index c50a8ae..a218f2c 100644 --- a/go.mod +++ b/go.mod @@ -1,49 +1,51 @@ module github.com/mosuka/blast -go 1.13 +go 1.14 require ( - github.com/RoaringBitmap/roaring v0.4.21 // indirect - github.com/blevesearch/bleve v0.8.1 + github.com/RoaringBitmap/roaring v0.4.17 // indirect + github.com/bbva/raft-badger v1.0.0 + github.com/blevesearch/bleve v0.8.0 github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040 // indirect - github.com/blevesearch/cld2 v0.0.0-20150916130542-10f17c049ec9 // indirect - github.com/blevesearch/snowballstem v0.0.0-20180110192139-26b06a2c243d // indirect - github.com/couchbase/ghistogram v0.0.0-20170308220240-d910dd063dd6 // indirect + github.com/blevesearch/cld2 v0.0.0-20200327141045-8b5f551d37f5 // indirect + github.com/blevesearch/go-porterstemmer v1.0.2 // indirect + github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f // indirect + github.com/blevesearch/snowballstem v0.0.0-20200325004757-48afb64082dd // indirect + github.com/couchbase/ghistogram v0.1.0 // indirect github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498 // indirect - github.com/couchbase/vellum v0.0.0-20190829182332-ef2e028c01fd // indirect + github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe // indirect github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 // indirect + github.com/dgraph-io/badger/v2 v2.0.0 + github.com/edsrzf/mmap-go v1.0.0 // indirect github.com/etcd-io/bbolt v1.3.3 // indirect github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect github.com/gogo/protobuf v1.3.0 - github.com/golang/protobuf v1.3.2 - github.com/google/go-cmp v0.3.1 - github.com/gorilla/mux v1.7.3 - github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 + github.com/golang/protobuf v1.3.5 + github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/grpc-ecosystem/grpc-gateway v1.11.1 - github.com/hashicorp/raft v1.1.1 - github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477 - github.com/ikawaha/kagome.ipadic v1.1.0 // indirect - github.com/imdario/mergo v0.3.7 + github.com/grpc-ecosystem/grpc-gateway v1.14.3 + github.com/hashicorp/raft v1.1.2 + github.com/ikawaha/kagome.ipadic v1.1.2 // indirect github.com/jmhodges/levigo v1.0.0 // indirect - github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217 - github.com/mosuka/bbadger v0.1.0 + github.com/mash/go-accesslog v1.1.0 + github.com/mitchellh/go-homedir v1.1.0 github.com/natefinch/lumberjack v2.0.0+incompatible - github.com/prometheus/client_golang v1.1.0 + github.com/prometheus/client_golang v1.5.1 + github.com/prometheus/common v0.9.1 github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 // indirect - github.com/stretchr/objx v0.2.0 + github.com/spf13/cobra v0.0.7 + github.com/spf13/viper v1.4.0 + github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 // indirect github.com/syndtr/goleveldb v1.0.0 // indirect - github.com/tebeka/snowball v0.3.0 // indirect + github.com/tebeka/snowball v0.4.1 // indirect github.com/tecbot/gorocksdb v0.0.0-20190705090504-162552197222 // indirect - github.com/urfave/cli v1.22.1 go.etcd.io/bbolt v1.3.3 // indirect - go.uber.org/zap v1.10.0 - google.golang.org/genproto v0.0.0-20190916214212-f660b8655731 - google.golang.org/grpc v1.23.1 + go.uber.org/zap v1.14.1 + google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c + google.golang.org/grpc v1.28.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect - gopkg.in/yaml.v2 v2.2.2 ) diff --git a/go.sum b/go.sum index 0379a4e..974344e 100644 --- a/go.sum +++ b/go.sum @@ -1,73 +1,95 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= +github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/RoaringBitmap/roaring v0.4.17 h1:oCYFIFEMSQZrLHpywH7919esI1VSrQZ0pJXkZPGIJ78= github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= -github.com/RoaringBitmap/roaring v0.4.21 h1:WJ/zIlNX4wQZ9x8Ey33O1UaD9TCTakYsdLFSBcTwH+8= -github.com/RoaringBitmap/roaring v0.4.21/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/Smerity/govarint v0.0.0-20150407073650-7265e41f48f1/go.mod h1:o80NPAib/LOl8Eysqppjj7kkGkqz++eqzYGlvROpDcQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= +github.com/bbva/raft-badger v1.0.0 h1:N8C2rELUxfrVZhtyCBja/ymhv8cvPhVB+3ab2ob9mkk= +github.com/bbva/raft-badger v1.0.0/go.mod h1:yQjfHBXGV55aXOoEAuNGNlIIGvGNbSG85gOLhfo0pDM= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/blevesearch/bleve v0.7.0/go.mod h1:Y2lmIkzV6mcNfAnAdOd+ZxHkHchhBfU/xroGIp61wfw= -github.com/blevesearch/bleve v0.8.1 h1:20zBREtGe8dvBxCC+717SaxKcUVQOWk3/Fm75vabKpU= -github.com/blevesearch/bleve v0.8.1/go.mod h1:Y2lmIkzV6mcNfAnAdOd+ZxHkHchhBfU/xroGIp61wfw= +github.com/blevesearch/bleve v0.8.0 h1:DCoCrxscCXrlzVWK92k7Vq4d28lTAFuigVmcgIX0VCo= +github.com/blevesearch/bleve v0.8.0/go.mod h1:Y2lmIkzV6mcNfAnAdOd+ZxHkHchhBfU/xroGIp61wfw= github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040 h1:SjYVcfJVZoCfBlg+fkaq2eoZHTf5HaJfaTeTkOtyfHQ= github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040/go.mod h1:WH+MU2F4T0VmSdaPX+Wu5GYoZBrYWdOZWSjzvYcDmqQ= -github.com/blevesearch/cld2 v0.0.0-20150916130542-10f17c049ec9 h1:ZPImXwzC+ICkkSYlPP9mMVgQlZH24+56rIEUjVxfFnY= -github.com/blevesearch/cld2 v0.0.0-20150916130542-10f17c049ec9/go.mod h1:PN0QNTLs9+j1bKy3d/GB/59wsNBFC4sWLWG3k69lWbc= +github.com/blevesearch/cld2 v0.0.0-20200327141045-8b5f551d37f5 h1:/4ikScMMYMqsRFWJjCyzd3CNWB0lxvqDkqa5nEv6NMc= +github.com/blevesearch/cld2 v0.0.0-20200327141045-8b5f551d37f5/go.mod h1:PN0QNTLs9+j1bKy3d/GB/59wsNBFC4sWLWG3k69lWbc= github.com/blevesearch/go-porterstemmer v1.0.2 h1:qe7n69gBd1OLY5sHKnxQHIbzn0LNJA4hpAf+5XDxV2I= github.com/blevesearch/go-porterstemmer v1.0.2/go.mod h1:haWQqFT3RdOGz7PJuM3or/pWNJS1pKkoZJWCkWu0DVA= github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f h1:kqbi9lqXLLs+zfWlgo1PIiRQ86n33K1JKotjj4rSYOg= github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f/go.mod h1:IInt5XRvpiGE09KOk9mmCMLjHhydIhNPKPPFLFBB7L8= -github.com/blevesearch/snowballstem v0.0.0-20180110192139-26b06a2c243d h1:iPCfLXcTYDotqO1atEOQyoRDwlGaZVuMI4wSaKQlI2I= -github.com/blevesearch/snowballstem v0.0.0-20180110192139-26b06a2c243d/go.mod h1:cdytUvf6FKWA9NpXJihYdZq8TN2AiQ5HOS0UZUz0C9g= +github.com/blevesearch/snowballstem v0.0.0-20200325004757-48afb64082dd h1:YVyOs9yxpxqcB93Ul/UbdGTh26TrTafZrLdCqbJ4IXs= +github.com/blevesearch/snowballstem v0.0.0-20200325004757-48afb64082dd/go.mod h1:cdytUvf6FKWA9NpXJihYdZq8TN2AiQ5HOS0UZUz0C9g= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/couchbase/ghistogram v0.0.0-20170308220240-d910dd063dd6 h1:T7Qykid5GIoDEVTZL0NcbimcT2qmzjo5mNGhe8i0/5M= -github.com/couchbase/ghistogram v0.0.0-20170308220240-d910dd063dd6/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/couchbase/ghistogram v0.1.0 h1:b95QcQTCzjTUocDXp/uMgSNQi8oj1tGwnJ4bODWZnps= +github.com/couchbase/ghistogram v0.1.0/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498 h1:b8rnI4JWbakUNfpmYDxGobTY/jTuF5zHLw0ID75yzuM= github.com/couchbase/moss v0.0.0-20190322010551-a0cae174c498/go.mod h1:mGI1GcdgmlL3Imff7Z+OjkkQ8qSKr443BuZ+qFgWbPQ= github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe h1:2o6Y7KMjJNsuMTF8f2H2eTKRhqH7+bQbjr+D+LnhE5M= github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe/go.mod h1:prYTC8EgTu3gwbqJihkud9zRXISvyulAplQ6exdCo1g= -github.com/couchbase/vellum v0.0.0-20190829182332-ef2e028c01fd h1:zeuJhcG3f8eePshH3KxkNE+Xtl53pVln9MOUPMyr/1w= -github.com/couchbase/vellum v0.0.0-20190829182332-ef2e028c01fd/go.mod h1:xbc8Ff/oG7h2ejd7AlwOpfd+6QZntc92ygpAOfGwcKY= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d h1:SwD98825d6bdB+pEuTxWOXiSjBrHdOl/UVp75eI7JT8= github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 h1:MZRmHqDBd0vxNwenEbKSQqRVT24d3C05ft8kduSwlqM= github.com/cznic/strutil v0.0.0-20181122101858-275e90344537/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/badger v2.0.0-rc.2.0.20190626232749-b116882676f2+incompatible h1:xeEWHqaQFcm44dJsZYN6JIiLCHG+DciygDfGvIfbkv8= -github.com/dgraph-io/badger v2.0.0-rc.2.0.20190626232749-b116882676f2+incompatible/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f h1:dDxpBYafY/GYpcl+LS4Bn3ziLPuEdGRkRjYAbSlWxSA= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgraph-io/badger/v2 v2.0.0 h1:Cr05o2TUd2IcLbEY0aGd8mbjm1YyQpy+dswo3BcDXrE= +github.com/dgraph-io/badger/v2 v2.0.0/go.mod h1:YoRSIp1LmAJ7zH7tZwRvjNMUYLxB4wl3ebYkaIruZ04= +github.com/dgraph-io/ristretto v0.0.0-20191025175511-c1f00be0418e h1:aeUNgwup7PnDOBAD1BOKAqzb/W/NksOj6r3dwKKuqfg= +github.com/dgraph-io/ristretto v0.0.0-20191025175511-c1f00be0418e/go.mod h1:edzKIzGvqUCMzhTVWbiTSe75zD9Xxq0GtSBtFmaUTZs= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20191112170834-c2139c5d712b h1:SeiGBzKrEtuDddnBABHkp4kq9sBGE9nuYmk6FPTg0zg= +github.com/dgryski/go-farm v0.0.0-20191112170834-c2139c5d712b/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= @@ -83,43 +105,48 @@ github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493 h1:OTanQnFt0bi5iLFSdbEVA/idR6Q2WhCm+deb7ir2CcM= github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0 h1:G8O7TerXerS4F6sx9OV7/nRfJdnXgHZu/S/7F2SN+UE= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg= -github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 h1:0IKlLyQ3Hs9nDaiK5cSHAGmcQEIC8l2Ts1u6x5Dfrqg= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.11.1 h1:/dBYI+n4xIL+Y9SKXQrjlKTmJJDwCSlNLRwZ5nBhIek= -github.com/grpc-ecosystem/grpc-gateway v1.11.1/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.14.3 h1:OCJlWkOUoTnl0neNGlf4fUm3TmbEtguw7vR+nGtnDjY= +github.com/grpc-ecosystem/grpc-gateway v1.14.3/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= @@ -132,24 +159,24 @@ github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCS github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= -github.com/hashicorp/raft v1.1.1 h1:HJr7UE1x/JrJSc9Oy6aDBHtNHUUBHjcQjTgvUVihoZs= github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft v1.1.2 h1:oxEL5DDeurYxLd3UbcY/hccgSPhLLpiBZ1YxtWEq59c= +github.com/hashicorp/raft v1.1.2/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= -github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477 h1:bLsrEmB2NUwkHH18FOJBIa04wOV2RQalJrcafTYu6Lg= -github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477/go.mod h1:aUF6HQr8+t3FC/ZHAC+pZreUBhTaxumuu3L+d37uRxk= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ikawaha/kagome.ipadic v1.1.0 h1:9hzwhcklEL4Cmp+lM9HQfmDg2nhB43Fe1n9UUY6mifY= -github.com/ikawaha/kagome.ipadic v1.1.0/go.mod h1:DPSBbU0czaJhAb/5uKQZHMc9MTVRpDugJfX+HddPHHg= -github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= -github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/ikawaha/kagome.ipadic v1.1.2 h1:pFxZ1PpMpc6ZoBK712YN5cVK0u/ju2DZ+gRIOriJFFs= +github.com/ikawaha/kagome.ipadic v1.1.2/go.mod h1:DPSBbU0czaJhAb/5uKQZHMc9MTVRpDugJfX+HddPHHg= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -163,24 +190,26 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217 h1:oWyemD7bnPAGRGGPE22W1Z+kspkC7Uclz5rdzgxxiwk= -github.com/mash/go-accesslog v0.0.0-20180522074327-610c2be04217/go.mod h1:5JLTyA+23fYz/BfD5Hn736mGEZopzWtEx1pdNfnTp8k= +github.com/mash/go-accesslog v1.1.0 h1:y22583qP3s+SePBs6mv8ZTz5D1UffPrSg+WFEW2Rf/c= +github.com/mash/go-accesslog v1.1.0/go.mod h1:DAbGQzio0KX16krP/3uouoTPxGbzcPjFAb948zazOgg= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mosuka/bbadger v0.1.0 h1:yc0UbkZFREZjzcNqXJp0/DPOTWld9Vq/S/MTHOb4x14= -github.com/mosuka/bbadger v0.1.0/go.mod h1:Er3F7xRxkBmVSIhqjA9CSk7ovFqfdcZDdzFBWJqfwog= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -189,6 +218,7 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= @@ -198,25 +228,38 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= -github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= @@ -229,68 +272,99 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykE github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff h1:86HlEv0yBCry9syNuylzqznKXDK11p6D0DT596yNMys= github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.7 h1:FfTH+vuMXOas8jmfb5/M7dzEYx7LpcLb7a0LPe34uOU= +github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 h1:JNEGSiWg6D3lcBCMCBqN3ELniXujt+0QNHLhNnO0w3s= github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2/go.mod h1:mjqs7N0Q6m5HpR7QfXVBZXZWSqTjQLeTujjA/xUp2uw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/tebeka/snowball v0.3.0 h1:/vP76OjIhZrXtcmBmQgQ986B/WM95MB4tdLEuWdDgZk= -github.com/tebeka/snowball v0.3.0/go.mod h1:4IfL14h1lvwZcp1sfXuuc7/7yCsvVffTWxWxCLfFpYg= +github.com/tebeka/snowball v0.4.1 h1:erVaJlHNQD465+S9dBGnl/AdDiGU0N8FTRo5QexNgCs= +github.com/tebeka/snowball v0.4.1/go.mod h1:4IfL14h1lvwZcp1sfXuuc7/7yCsvVffTWxWxCLfFpYg= github.com/tecbot/gorocksdb v0.0.0-20190705090504-162552197222 h1:FLimlAjzuhq8loeLX7lLhKKeUgpA/4slynlNVB/Qaks= github.com/tecbot/gorocksdb v0.0.0-20190705090504-162552197222/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0 h1:2mqDk8w/o6UmeUCu5Qiq2y7iMf6anbx+YA8d1JFoFrs= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -298,35 +372,53 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5 h1:sM3evRHxE/1RuMe1FYAL3j7C7fUfIjkbE+NiDAYUF8U= golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190916214212-f660b8655731 h1:Phvl0+G5t5k/EUFUi0wPdUUeTL2HydMQUXHnunWgSb0= -google.golang.org/genproto v0.0.0-20190916214212-f660b8655731/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c h1:hrpEMCZ2O7DR5gC1n2AJGVhrwiEjOi35+jxtIuZpTMo= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= @@ -336,7 +428,12 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/hashutils/hashutils.go b/hashutils/hashutils.go deleted file mode 100644 index 2ac1911..0000000 --- a/hashutils/hashutils.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package hashutils - -import ( - "crypto/sha256" - "encoding/hex" - "encoding/json" -) - -func Hash(v interface{}) (string, error) { - b, err := json.Marshal(v) - if err != nil { - return "", err - } - - hb := sha256.Sum256(b) - - return hex.EncodeToString(hb[:]), nil -} diff --git a/http/metric.go b/http/metric.go deleted file mode 100644 index 09afbf5..0000000 --- a/http/metric.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package http - -import ( - "net/http" - "strconv" - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - namespace = "http" - subsystem = "server" - - DurationSeconds = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "handling_seconds", - Help: "The invocation duration in seconds.", - }, - []string{ - "request_uri", - }, - ) - - RequestsTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "handled_total", - Help: "The number of requests.", - }, - []string{ - "request_uri", - "http_method", - "http_status", - }, - ) - - RequestsBytesTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "requests_received_bytes", - Help: "A summary of the invocation requests bytes.", - }, - []string{ - "request_uri", - "http_method", - }, - ) - - ResponsesBytesTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "responses_sent_bytes", - Help: "A summary of the invocation responses bytes.", - }, - []string{ - "request_uri", - "http_method", - }, - ) -) - -func init() { - prometheus.MustRegister(DurationSeconds) - prometheus.MustRegister(RequestsTotal) - prometheus.MustRegister(RequestsBytesTotal) - prometheus.MustRegister(ResponsesBytesTotal) -} - -func RecordMetrics(start time.Time, status int, writer http.ResponseWriter, request *http.Request) { - DurationSeconds.With(prometheus.Labels{"request_uri": request.RequestURI}).Observe(float64(time.Since(start)) / float64(time.Second)) - - RequestsTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "http_method": request.Method, "http_status": strconv.Itoa(status)}).Inc() - - RequestsBytesTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "http_method": request.Method}).Add(float64(request.ContentLength)) - - contentLength, err := strconv.ParseFloat(writer.Header().Get("Content-Length"), 64) - if err == nil { - ResponsesBytesTotal.With(prometheus.Labels{"request_uri": request.RequestURI, "http_method": request.Method}).Add(contentLength) - } -} diff --git a/http/response.go b/http/response.go deleted file mode 100644 index d51fdc2..0000000 --- a/http/response.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package http - -import ( - "encoding/json" - "net/http" - "strconv" - - "go.uber.org/zap" -) - -func NewJSONMessage(msgMap map[string]interface{}) ([]byte, error) { - content, err := json.MarshalIndent(msgMap, "", " ") - if err != nil { - return nil, err - } - - return content, nil -} - -func WriteResponse(w http.ResponseWriter, content []byte, status int, logger *zap.Logger) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.Header().Set("Content-Length", strconv.FormatInt(int64(len(content)), 10)) - w.WriteHeader(status) - _, err := w.Write(content) - if err != nil { - logger.Error(err.Error()) - } - - return -} diff --git a/indexer/grpc_client.go b/indexer/grpc_client.go deleted file mode 100644 index 38ace62..0000000 --- a/indexer/grpc_client.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "context" - "math" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/protobuf/index" - "google.golang.org/grpc" -) - -type GRPCClient struct { - ctx context.Context - cancel context.CancelFunc - conn *grpc.ClientConn - client index.IndexClient -} - -func NewGRPCContext() (context.Context, context.CancelFunc) { - baseCtx := context.TODO() - //return context.WithTimeout(baseCtx, 60*time.Second) - return context.WithCancel(baseCtx) -} - -func NewGRPCClient(address string) (*GRPCClient, error) { - ctx, cancel := NewGRPCContext() - - //streamRetryOpts := []grpc_retry.CallOption{ - // grpc_retry.Disable(), - //} - - //unaryRetryOpts := []grpc_retry.CallOption{ - // grpc_retry.WithBackoff(grpc_retry.BackoffLinear(100 * time.Millisecond)), - // grpc_retry.WithCodes(codes.Unavailable), - // grpc_retry.WithMax(100), - //} - - dialOpts := []grpc.DialOption{ - grpc.WithInsecure(), - grpc.WithDefaultCallOptions( - grpc.MaxCallSendMsgSize(math.MaxInt32), - grpc.MaxCallRecvMsgSize(math.MaxInt32), - ), - //grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(streamRetryOpts...)), - //grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(unaryRetryOpts...)), - } - - conn, err := grpc.DialContext(ctx, address, dialOpts...) - if err != nil { - return nil, err - } - - return &GRPCClient{ - ctx: ctx, - cancel: cancel, - conn: conn, - client: index.NewIndexClient(conn), - }, nil -} - -func (c *GRPCClient) Cancel() { - c.cancel() -} - -func (c *GRPCClient) Close() error { - c.Cancel() - if c.conn != nil { - return c.conn.Close() - } - - return c.ctx.Err() -} - -func (c *GRPCClient) GetAddress() string { - return c.conn.Target() -} - -func (c *GRPCClient) NodeHealthCheck(req *index.NodeHealthCheckRequest, opts ...grpc.CallOption) (*index.NodeHealthCheckResponse, error) { - return c.client.NodeHealthCheck(c.ctx, req, opts...) -} - -func (c *GRPCClient) NodeInfo(req *empty.Empty, opts ...grpc.CallOption) (*index.NodeInfoResponse, error) { - return c.client.NodeInfo(c.ctx, req, opts...) -} - -func (c *GRPCClient) ClusterJoin(req *index.ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.ClusterJoin(c.ctx, req, opts...) -} - -func (c *GRPCClient) ClusterLeave(req *index.ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.ClusterLeave(c.ctx, req, opts...) -} - -func (c *GRPCClient) ClusterInfo(req *empty.Empty, opts ...grpc.CallOption) (*index.ClusterInfoResponse, error) { - return c.client.ClusterInfo(c.ctx, &empty.Empty{}, opts...) -} - -func (c *GRPCClient) ClusterWatch(req *empty.Empty, opts ...grpc.CallOption) (index.Index_ClusterWatchClient, error) { - return c.client.ClusterWatch(c.ctx, req, opts...) -} - -func (c *GRPCClient) Get(req *index.GetRequest, opts ...grpc.CallOption) (*index.GetResponse, error) { - return c.client.Get(c.ctx, req, opts...) -} - -func (c *GRPCClient) Index(req *index.IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.Index(c.ctx, req, opts...) -} - -func (c *GRPCClient) Delete(req *index.DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.Delete(c.ctx, req, opts...) -} - -func (c *GRPCClient) BulkIndex(req *index.BulkIndexRequest, opts ...grpc.CallOption) (*index.BulkIndexResponse, error) { - return c.client.BulkIndex(c.ctx, req, opts...) -} - -func (c *GRPCClient) BulkDelete(req *index.BulkDeleteRequest, opts ...grpc.CallOption) (*index.BulkDeleteResponse, error) { - return c.client.BulkDelete(c.ctx, req, opts...) -} - -func (c *GRPCClient) Search(req *index.SearchRequest, opts ...grpc.CallOption) (*index.SearchResponse, error) { - return c.client.Search(c.ctx, req, opts...) -} - -func (c *GRPCClient) GetIndexConfig(req *empty.Empty, opts ...grpc.CallOption) (*index.GetIndexConfigResponse, error) { - return c.client.GetIndexConfig(c.ctx, &empty.Empty{}, opts...) -} - -func (c *GRPCClient) GetIndexStats(req *empty.Empty, opts ...grpc.CallOption) (*index.GetIndexStatsResponse, error) { - return c.client.GetIndexStats(c.ctx, &empty.Empty{}, opts...) -} - -func (c *GRPCClient) Snapshot(req *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.Snapshot(c.ctx, &empty.Empty{}) -} diff --git a/indexer/grpc_gateway.go b/indexer/grpc_gateway.go deleted file mode 100644 index 3a1fafa..0000000 --- a/indexer/grpc_gateway.go +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "bufio" - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - - "github.com/blevesearch/bleve" - "github.com/golang/protobuf/ptypes/any" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/index" - "go.uber.org/zap" - "google.golang.org/grpc" -) - -type JsonMarshaler struct{} - -// ContentType always Returns "application/json". -func (*JsonMarshaler) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -func (j *JsonMarshaler) Marshal(v interface{}) ([]byte, error) { - switch v.(type) { - case *index.GetResponse: - value, err := protobuf.MarshalAny(v.(*index.GetResponse).Fields) - if err != nil { - return nil, err - } - return json.Marshal( - map[string]interface{}{ - "fields": value, - }, - ) - case *index.SearchResponse: - value, err := protobuf.MarshalAny(v.(*index.SearchResponse).SearchResult) - if err != nil { - return nil, err - } - return json.Marshal( - map[string]interface{}{ - "search_result": value, - }, - ) - default: - return json.Marshal(v) - } -} - -// Unmarshal unmarshals JSON data into "v". -func (j *JsonMarshaler) Unmarshal(data []byte, v interface{}) error { - switch v.(type) { - case *index.SearchRequest: - m := map[string]interface{}{} - err := json.Unmarshal(data, &m) - if err != nil { - return err - } - searchRequestMap, ok := m["search_request"] - if !ok { - return errors.New("search_request does not exist") - } - searchRequestBytes, err := json.Marshal(searchRequestMap) - if err != nil { - return err - } - searchRequest := bleve.NewSearchRequest(nil) - err = json.Unmarshal(searchRequestBytes, searchRequest) - if err != nil { - return err - } - v.(*index.SearchRequest).SearchRequest = &any.Any{} - return protobuf.UnmarshalAny(searchRequest, v.(*index.SearchRequest).SearchRequest) - default: - return json.Unmarshal(data, v) - } -} - -// NewDecoder returns a Decoder which reads JSON stream from "r". -func (j *JsonMarshaler) NewDecoder(r io.Reader) runtime.Decoder { - return runtime.DecoderFunc( - func(v interface{}) error { - buffer, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - switch v.(type) { - case *index.IndexRequest: - var tmpValue map[string]interface{} - err = json.Unmarshal(buffer, &tmpValue) - if err != nil { - return err - } - id, ok := tmpValue["id"].(string) - if ok { - v.(*index.IndexRequest).Id = id - } - - fields, ok := tmpValue["fields"] - if !ok { - return errors.New("value does not exist") - } - v.(*index.IndexRequest).Fields = &any.Any{} - return protobuf.UnmarshalAny(fields, v.(*index.IndexRequest).Fields) - case *index.SearchRequest: - var tmpValue map[string]interface{} - err = json.Unmarshal(buffer, &tmpValue) - if err != nil { - return err - } - searchRequestMap, ok := tmpValue["search_request"] - if !ok { - return errors.New("value does not exist") - } - searchRequestBytes, err := json.Marshal(searchRequestMap) - if err != nil { - return err - } - var searchRequest *bleve.SearchRequest - err = json.Unmarshal(searchRequestBytes, &searchRequest) - if err != nil { - return err - } - v.(*index.SearchRequest).SearchRequest = &any.Any{} - return protobuf.UnmarshalAny(searchRequest, v.(*index.SearchRequest).SearchRequest) - default: - return json.Unmarshal(buffer, v) - } - }, - ) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JsonMarshaler) NewEncoder(w io.Writer) runtime.Encoder { - return json.NewEncoder(w) -} - -// Delimiter for newline encoded JSON streams. -func (j *JsonMarshaler) Delimiter() []byte { - return []byte("\n") -} - -type JsonlMarshaler struct{} - -// ContentType always Returns "application/json". -func (*JsonlMarshaler) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -func (j *JsonlMarshaler) Marshal(v interface{}) ([]byte, error) { - return json.Marshal(v) -} - -// Unmarshal unmarshals JSON data into "v". -func (j *JsonlMarshaler) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// NewDecoder returns a Decoder which reads JSON-LINE stream from "r". -func (j *JsonlMarshaler) NewDecoder(r io.Reader) runtime.Decoder { - return runtime.DecoderFunc( - func(v interface{}) error { - buffer, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - switch v.(type) { - case *index.BulkIndexRequest: - docs := make([]*index.Document, 0) - reader := bufio.NewReader(bytes.NewReader(buffer)) - for { - docBytes, err := reader.ReadBytes('\n') - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - break - } - } - - if len(docBytes) > 0 { - doc := &index.Document{} - err = index.UnmarshalDocument(docBytes, doc) - if err != nil { - return err - } - docs = append(docs, doc) - } - } - v.(*index.BulkIndexRequest).Documents = docs - return nil - default: - return json.Unmarshal(buffer, v) - } - }, - ) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JsonlMarshaler) NewEncoder(w io.Writer) runtime.Encoder { - return json.NewEncoder(w) -} - -// Delimiter for newline encoded JSON streams. -func (j *JsonlMarshaler) Delimiter() []byte { - return []byte("\n") -} - -type TextMarshaler struct{} - -// ContentType always Returns "application/json". -func (*TextMarshaler) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -func (j *TextMarshaler) Marshal(v interface{}) ([]byte, error) { - return json.Marshal(v) -} - -// Unmarshal unmarshals JSON data into "v". -func (j *TextMarshaler) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// NewDecoder returns a Decoder which reads text stream from "r". -func (j *TextMarshaler) NewDecoder(r io.Reader) runtime.Decoder { - return runtime.DecoderFunc( - func(v interface{}) error { - buffer, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - switch v.(type) { - case *index.BulkDeleteRequest: - ids := make([]string, 0) - reader := bufio.NewReader(bytes.NewReader(buffer)) - for { - //idBytes, err := reader.ReadBytes('\n') - idBytes, _, err := reader.ReadLine() - if err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - if len(idBytes) > 0 { - ids = append(ids, string(idBytes)) - } - break - } - } - - if len(idBytes) > 0 { - ids = append(ids, string(idBytes)) - } - } - v.(*index.BulkDeleteRequest).Ids = ids - return nil - default: - return json.Unmarshal(buffer, v) - } - }, - ) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *TextMarshaler) NewEncoder(w io.Writer) runtime.Encoder { - return json.NewEncoder(w) -} - -// Delimiter for newline encoded JSON streams. -func (j *TextMarshaler) Delimiter() []byte { - return []byte("\n") -} - -type GRPCGateway struct { - grpcGatewayAddr string - grpcAddr string - logger *zap.Logger - - ctx context.Context - cancel context.CancelFunc - listener net.Listener -} - -func NewGRPCGateway(grpcGatewayAddr string, grpcAddr string, logger *zap.Logger) (*GRPCGateway, error) { - return &GRPCGateway{ - grpcGatewayAddr: grpcGatewayAddr, - grpcAddr: grpcAddr, - logger: logger, - }, nil -} - -func (s *GRPCGateway) Start() error { - s.ctx, s.cancel = NewGRPCContext() - - mux := runtime.NewServeMux( - runtime.WithMarshalerOption("application/json", new(JsonMarshaler)), - runtime.WithMarshalerOption("application/x-ndjson", new(JsonlMarshaler)), - runtime.WithMarshalerOption("text/plain", new(TextMarshaler)), - ) - opts := []grpc.DialOption{grpc.WithInsecure()} - - err := index.RegisterIndexHandlerFromEndpoint(s.ctx, mux, s.grpcAddr, opts) - if err != nil { - return err - } - - s.listener, err = net.Listen("tcp", s.grpcGatewayAddr) - if err != nil { - return err - } - - err = http.Serve(s.listener, mux) - if err != nil { - return err - } - - return nil -} - -func (s *GRPCGateway) Stop() error { - defer s.cancel() - - err := s.listener.Close() - if err != nil { - return err - } - - return nil -} - -func (s *GRPCGateway) GetAddress() (string, error) { - tcpAddr, err := net.ResolveTCPAddr("tcp", s.listener.Addr().String()) - if err != nil { - return "", err - } - - v4Addr := "" - if tcpAddr.IP.To4() != nil { - v4Addr = tcpAddr.IP.To4().String() - } - port := tcpAddr.Port - - return fmt.Sprintf("%s:%d", v4Addr, port), nil -} diff --git a/indexer/grpc_server.go b/indexer/grpc_server.go deleted file mode 100644 index 8dd8c78..0000000 --- a/indexer/grpc_server.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "net" - - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/mosuka/blast/protobuf/index" - "go.uber.org/zap" - "google.golang.org/grpc" - //grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" - //grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" - //grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags" - //grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" -) - -type GRPCServer struct { - service index.IndexServer - server *grpc.Server - listener net.Listener - - logger *zap.Logger -} - -func NewGRPCServer(grpcAddr string, service index.IndexServer, logger *zap.Logger) (*GRPCServer, error) { - server := grpc.NewServer( - grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( - //grpc_ctxtags.StreamServerInterceptor(), - //grpc_opentracing.StreamServerInterceptor(), - grpc_prometheus.StreamServerInterceptor, - grpc_zap.StreamServerInterceptor(logger), - //grpc_auth.StreamServerInterceptor(myAuthFunction), - //grpc_recovery.StreamServerInterceptor(), - )), - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( - //grpc_ctxtags.UnaryServerInterceptor(), - //grpc_opentracing.UnaryServerInterceptor(), - grpc_prometheus.UnaryServerInterceptor, - grpc_zap.UnaryServerInterceptor(logger), - //grpc_auth.UnaryServerInterceptor(myAuthFunction), - //grpc_recovery.UnaryServerInterceptor(), - )), - ) - - index.RegisterIndexServer(server, service) - - grpc_prometheus.EnableHandlingTimeHistogram() - grpc_prometheus.Register(server) - - listener, err := net.Listen("tcp", grpcAddr) - if err != nil { - return nil, err - } - - return &GRPCServer{ - service: service, - server: server, - listener: listener, - logger: logger, - }, nil -} - -func (s *GRPCServer) Start() error { - s.logger.Info("start server") - err := s.server.Serve(s.listener) - if err != nil { - return err - } - - return nil -} - -func (s *GRPCServer) Stop() error { - s.logger.Info("stop server") - s.server.Stop() - //s.server.GracefulStop() - - return nil -} diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go deleted file mode 100644 index 63b8d78..0000000 --- a/indexer/grpc_service.go +++ /dev/null @@ -1,1018 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "sync" - "time" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/mapping" - "github.com/golang/protobuf/ptypes/any" - "github.com/golang/protobuf/ptypes/empty" - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/raft" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/index" - "github.com/mosuka/blast/protobuf/management" - "go.uber.org/zap" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type GRPCService struct { - managerGrpcAddress string - shardId string - raftServer *RaftServer - logger *zap.Logger - - updateClusterStopCh chan struct{} - updateClusterDoneCh chan struct{} - peers *index.Cluster - peerClients map[string]*GRPCClient - cluster *index.Cluster - clusterChans map[chan index.ClusterWatchResponse]struct{} - clusterMutex sync.RWMutex - - managers *management.Cluster - managerClients map[string]*manager.GRPCClient - updateManagersStopCh chan struct{} - updateManagersDoneCh chan struct{} -} - -func NewGRPCService(managerGrpcAddress string, shardId string, raftServer *RaftServer, logger *zap.Logger) (*GRPCService, error) { - return &GRPCService{ - managerGrpcAddress: managerGrpcAddress, - shardId: shardId, - raftServer: raftServer, - logger: logger, - - peers: &index.Cluster{Nodes: make(map[string]*index.Node, 0)}, - peerClients: make(map[string]*GRPCClient, 0), - cluster: &index.Cluster{Nodes: make(map[string]*index.Node, 0)}, - clusterChans: make(map[chan index.ClusterWatchResponse]struct{}), - - managers: &management.Cluster{Nodes: make(map[string]*management.Node, 0)}, - managerClients: make(map[string]*manager.GRPCClient, 0), - }, nil -} - -func (s *GRPCService) Start() error { - if s.managerGrpcAddress != "" { - var err error - s.managers, err = s.getManagerCluster(s.managerGrpcAddress) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - for id, node := range s.managers.Nodes { - client, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Fatal(err.Error(), zap.String("id", id), zap.String("grpc_address", s.managerGrpcAddress)) - } - s.managerClients[node.Id] = client - } - - s.logger.Info("start to update manager cluster info") - go s.startUpdateManagers(500 * time.Millisecond) - } - - s.logger.Info("start to update cluster info") - go s.startUpdateCluster(500 * time.Millisecond) - - return nil -} - -func (s *GRPCService) Stop() error { - s.logger.Info("stop to update cluster info") - s.stopUpdateCluster() - - if s.managerGrpcAddress != "" { - s.logger.Info("stop to update manager cluster info") - s.stopUpdateManagers() - } - - return nil -} - -func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { - var client *manager.GRPCClient - - for id, node := range s.managers.Nodes { - if node.Metadata == nil { - s.logger.Warn("assertion failed", zap.String("id", id)) - continue - } - - if node.State == management.Node_FOLLOWER || node.State == management.Node_LEADER { - var ok bool - client, ok = s.managerClients[id] - if ok { - return client, nil - } else { - s.logger.Error("node does not exist", zap.String("id", id)) - } - } else { - s.logger.Debug("node has not available", zap.String("id", id), zap.String("state", node.State.String())) - } - } - - err := errors.New("available client does not exist") - s.logger.Error(err.Error()) - - return nil, err -} - -func (s *GRPCService) getManagerCluster(managerAddr string) (*management.Cluster, error) { - client, err := manager.NewGRPCClient(managerAddr) - defer func() { - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - return - }() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - req := &empty.Empty{} - res, err := client.ClusterInfo(req) - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return res.Cluster, nil -} - -func (s *GRPCService) cloneManagerCluster(cluster *management.Cluster) (*management.Cluster, error) { - b, err := json.Marshal(cluster) - if err != nil { - return nil, err - } - - var clone *management.Cluster - err = json.Unmarshal(b, &clone) - if err != nil { - return nil, err - } - - return clone, nil -} - -func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { - s.updateManagersStopCh = make(chan struct{}) - s.updateManagersDoneCh = make(chan struct{}) - - defer func() { - close(s.updateManagersDoneCh) - }() - - for { - select { - case <-s.updateManagersStopCh: - s.logger.Info("received a request to stop updating a manager cluster") - return - default: - // get client for manager from the list - client, err := s.getManagerClient() - if err != nil { - s.logger.Error(err.Error()) - continue - } - - // create stream for watching cluster changes - req := &empty.Empty{} - stream, err := client.ClusterWatch(req) - if err != nil { - s.logger.Error(err.Error()) - continue - } - - s.logger.Info("wait for receive a manager cluster updates from stream") - resp, err := stream.Recv() - if err == io.EOF { - s.logger.Info(err.Error()) - continue - } - if err != nil { - s.logger.Error(err.Error()) - continue - } - s.logger.Info("cluster has changed", zap.Any("resp", resp)) - switch resp.Event { - case management.ClusterWatchResponse_JOIN, management.ClusterWatchResponse_UPDATE: - // add to cluster nodes - s.managers.Nodes[resp.Node.Id] = resp.Node - - // check node state - switch resp.Node.State { - case management.Node_UNKNOWN, management.Node_SHUTDOWN: - // close client - if client, exist := s.managerClients[resp.Node.Id]; exist { - s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id)) - } - delete(s.managerClients, resp.Node.Id) - } - default: // management.Node_FOLLOWER, management.Node_CANDIDATE, management.Node_LEADER - if resp.Node.Metadata.GrpcAddress == "" { - s.logger.Warn("missing gRPC address", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - continue - } - - // check client that already exist in the client list - if client, exist := s.managerClients[resp.Node.Id]; !exist { - // create new client - s.logger.Info("create gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - newClient, err := manager.NewGRPCClient(resp.Node.Metadata.GrpcAddress) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - continue - } - s.managerClients[resp.Node.Id] = newClient - } else { - if client.GetAddress() != resp.Node.Metadata.GrpcAddress { - // close client - s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id)) - } - delete(s.managerClients, resp.Node.Id) - - // re-create new client - s.logger.Info("re-create gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - newClient, err := manager.NewGRPCClient(resp.Node.Metadata.GrpcAddress) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) - continue - } - s.managerClients[resp.Node.Id] = newClient - } - } - } - case management.ClusterWatchResponse_LEAVE: - if client, exist := s.managerClients[resp.Node.Id]; exist { - s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) - } - delete(s.managerClients, resp.Node.Id) - } - - if _, exist := s.managers.Nodes[resp.Node.Id]; exist { - delete(s.managers.Nodes, resp.Node.Id) - } - default: - s.logger.Debug("unknown event", zap.Any("event", resp.Event)) - continue - } - } - } -} - -func (s *GRPCService) stopUpdateManagers() { - s.logger.Info("close all manager clients") - for id, client := range s.managerClients { - s.logger.Debug("close manager client", zap.String("id", id), zap.String("address", client.GetAddress())) - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - } - - if s.updateManagersStopCh != nil { - s.logger.Info("send a request to stop updating a manager cluster") - close(s.updateManagersStopCh) - } - - s.logger.Info("wait for the manager cluster update to stop") - <-s.updateManagersDoneCh - s.logger.Info("the manager cluster update has been stopped") -} - -func (s *GRPCService) getLeaderClient() (*GRPCClient, error) { - for id, node := range s.cluster.Nodes { - switch node.State { - case index.Node_LEADER: - if client, exist := s.peerClients[id]; exist { - return client, nil - } - } - } - - err := errors.New("there is no leader") - s.logger.Error(err.Error()) - return nil, err -} - -func (s *GRPCService) cloneCluster(cluster *index.Cluster) (*index.Cluster, error) { - b, err := json.Marshal(cluster) - if err != nil { - return nil, err - } - - var clone *index.Cluster - err = json.Unmarshal(b, &clone) - if err != nil { - return nil, err - } - - return clone, nil -} - -func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { - s.updateClusterStopCh = make(chan struct{}) - s.updateClusterDoneCh = make(chan struct{}) - - defer func() { - close(s.updateClusterDoneCh) - }() - - ticker := time.NewTicker(checkInterval) - defer ticker.Stop() - - savedCluster, err := s.cloneCluster(s.cluster) - if err != nil { - s.logger.Error(err.Error()) - return - } - - for { - select { - case <-s.updateClusterStopCh: - s.logger.Info("received a request to stop updating a cluster") - return - case <-ticker.C: - s.cluster, err = s.getCluster() - if err != nil { - s.logger.Error(err.Error()) - return - } - - snapshotCluster, err := s.cloneCluster(s.cluster) - if err != nil { - s.logger.Error(err.Error()) - return - } - - // create peer node list with out self node - for id, node := range snapshotCluster.Nodes { - if id != s.NodeID() { - s.peers.Nodes[id] = node - } - } - - // open clients for peer nodes - for id, node := range s.peers.Nodes { - if node.Metadata.GrpcAddress == "" { - s.logger.Debug("missing gRPC address", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - - client, exist := s.peerClients[id] - if exist { - if client.GetAddress() != node.Metadata.GrpcAddress { - s.logger.Info("recreate gRPC client", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - delete(s.peerClients, id) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id)) - } - newClient, err := NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - s.peerClients[id] = newClient - } - } else { - s.logger.Info("create gRPC client", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - newClient, err := NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - s.peerClients[id] = newClient - } - } - - // close clients for non-existent peer nodes - for id, client := range s.peerClients { - if _, exist := s.peers.Nodes[id]; !exist { - s.logger.Info("close gRPC client", zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) - } - delete(s.peerClients, id) - } - } - - // check joined and updated nodes - for id, node := range snapshotCluster.Nodes { - nodeSnapshot, exist := savedCluster.Nodes[id] - if exist { - // node exists in the cluster - n1, err := json.Marshal(node) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", node)) - continue - } - n2, err := json.Marshal(nodeSnapshot) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", nodeSnapshot)) - continue - } - if !cmp.Equal(n1, n2) { - // node updated - // notify the cluster changes - clusterResp := &index.ClusterWatchResponse{ - Event: index.ClusterWatchResponse_UPDATE, - Node: node, - Cluster: snapshotCluster, - } - for c := range s.clusterChans { - c <- *clusterResp - } - } - } else { - // node joined - // notify the cluster changes - clusterResp := &index.ClusterWatchResponse{ - Event: index.ClusterWatchResponse_JOIN, - Node: node, - Cluster: snapshotCluster, - } - for c := range s.clusterChans { - c <- *clusterResp - } - } - } - - // check left nodes - for id, node := range savedCluster.Nodes { - if _, exist := snapshotCluster.Nodes[id]; !exist { - // node left - // notify the cluster changes - clusterResp := &index.ClusterWatchResponse{ - Event: index.ClusterWatchResponse_LEAVE, - Node: node, - Cluster: snapshotCluster, - } - for c := range s.clusterChans { - c <- *clusterResp - } - } - } - - // set cluster state to manager - if !cmp.Equal(savedCluster, snapshotCluster) && s.managerGrpcAddress != "" && s.raftServer.IsLeader() { - snapshotClusterBytes, err := json.Marshal(snapshotCluster) - if err != nil { - s.logger.Error(err.Error()) - continue - } - var snapshotClusterMap map[string]interface{} - err = json.Unmarshal(snapshotClusterBytes, &snapshotClusterMap) - if err != nil { - s.logger.Error(err.Error()) - continue - } - - client, err := s.getManagerClient() - if err != nil { - s.logger.Error(err.Error()) - continue - } - valueAny := &any.Any{} - err = protobuf.UnmarshalAny(snapshotClusterMap, valueAny) - if err != nil { - s.logger.Error(err.Error()) - continue - } - req := &management.SetRequest{ - Key: fmt.Sprintf("cluster/shards/%s", s.shardId), - Value: valueAny, - } - _, err = client.Set(req) - if err != nil { - s.logger.Error(err.Error()) - continue - } - } - - savedCluster = snapshotCluster - default: - time.Sleep(100 * time.Millisecond) - } - } -} - -func (s *GRPCService) stopUpdateCluster() { - s.logger.Info("close all peer clients") - for id, client := range s.peerClients { - s.logger.Debug("close peer client", zap.String("id", id), zap.String("address", client.GetAddress())) - err := client.Close() - if err != nil { - s.logger.Warn(err.Error()) - } - } - - if s.updateClusterStopCh != nil { - s.logger.Info("send a request to stop updating a cluster") - close(s.updateClusterStopCh) - } - - s.logger.Info("wait for the cluster update to stop") - <-s.updateClusterDoneCh - s.logger.Info("the cluster update has been stopped") -} - -func (s *GRPCService) NodeHealthCheck(ctx context.Context, req *index.NodeHealthCheckRequest) (*index.NodeHealthCheckResponse, error) { - resp := &index.NodeHealthCheckResponse{} - - switch req.Probe { - case index.NodeHealthCheckRequest_UNKNOWN: - fallthrough - case index.NodeHealthCheckRequest_HEALTHINESS: - resp.State = index.NodeHealthCheckResponse_HEALTHY - case index.NodeHealthCheckRequest_LIVENESS: - resp.State = index.NodeHealthCheckResponse_ALIVE - case index.NodeHealthCheckRequest_READINESS: - resp.State = index.NodeHealthCheckResponse_READY - default: - err := errors.New("unknown probe") - s.logger.Error(err.Error()) - return resp, status.Error(codes.InvalidArgument, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) NodeID() string { - return s.raftServer.NodeID() -} - -func (s *GRPCService) getSelfNode() *index.Node { - node := s.raftServer.node - - switch s.raftServer.State() { - case raft.Follower: - node.State = index.Node_FOLLOWER - case raft.Candidate: - node.State = index.Node_CANDIDATE - case raft.Leader: - node.State = index.Node_LEADER - case raft.Shutdown: - node.State = index.Node_SHUTDOWN - default: - node.State = index.Node_UNKNOWN - } - - return node -} - -func (s *GRPCService) getPeerNode(id string) (*index.Node, error) { - if _, exist := s.peerClients[id]; !exist { - err := errors.New("node does not exist in peers") - s.logger.Debug(err.Error(), zap.String("id", id)) - return nil, err - } - - req := &empty.Empty{} - resp, err := s.peerClients[id].NodeInfo(req) - if err != nil { - s.logger.Debug(err.Error(), zap.String("id", id)) - return &index.Node{ - BindAddress: "", - State: index.Node_SHUTDOWN, - Metadata: &index.Metadata{ - GrpcAddress: "", - HttpAddress: "", - }, - }, nil - } - - return resp.Node, nil -} - -func (s *GRPCService) getNode(id string) (*index.Node, error) { - if id == "" || id == s.NodeID() { - return s.getSelfNode(), nil - } else { - return s.getPeerNode(id) - } -} - -func (s *GRPCService) NodeInfo(ctx context.Context, req *empty.Empty) (*index.NodeInfoResponse, error) { - resp := &index.NodeInfoResponse{} - - node, err := s.getNode(s.NodeID()) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return &index.NodeInfoResponse{ - Node: node, - }, nil -} - -func (s *GRPCService) setNode(node *index.Node) error { - if s.raftServer.IsLeader() { - err := s.raftServer.SetNode(node) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - req := &index.ClusterJoinRequest{ - Node: node, - } - - _, err = client.ClusterJoin(req) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } - - return nil -} - -func (s *GRPCService) ClusterJoin(ctx context.Context, req *index.ClusterJoinRequest) (*empty.Empty, error) { - resp := &empty.Empty{} - - err := s.setNode(req.Node) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) deleteNode(id string) error { - if s.raftServer.IsLeader() { - err := s.raftServer.DeleteNode(id) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - req := &index.ClusterLeaveRequest{ - Id: id, - } - - _, err = client.ClusterLeave(req) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } - - return nil -} - -func (s *GRPCService) ClusterLeave(ctx context.Context, req *index.ClusterLeaveRequest) (*empty.Empty, error) { - resp := &empty.Empty{} - - err := s.deleteNode(req.Id) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) getCluster() (*index.Cluster, error) { - cluster, err := s.raftServer.GetCluster() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - // update latest node state - for id := range cluster.Nodes { - node, err := s.getNode(id) - if err != nil { - s.logger.Debug(err.Error()) - continue - } - cluster.Nodes[id] = node - } - - return cluster, nil -} - -func (s *GRPCService) ClusterInfo(ctx context.Context, req *empty.Empty) (*index.ClusterInfoResponse, error) { - resp := &index.ClusterInfoResponse{} - - cluster, err := s.getCluster() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.Cluster = cluster - - return resp, nil -} - -func (s *GRPCService) ClusterWatch(req *empty.Empty, server index.Index_ClusterWatchServer) error { - chans := make(chan index.ClusterWatchResponse) - - s.clusterMutex.Lock() - s.clusterChans[chans] = struct{}{} - s.clusterMutex.Unlock() - - defer func() { - s.clusterMutex.Lock() - delete(s.clusterChans, chans) - s.clusterMutex.Unlock() - close(chans) - }() - - for resp := range chans { - err := server.Send(&resp) - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - } - - return nil -} - -func (s *GRPCService) Get(ctx context.Context, req *index.GetRequest) (*index.GetResponse, error) { - resp := &index.GetResponse{} - - fields, err := s.raftServer.Get(req.Id) - if err != nil { - switch err { - case blasterrors.ErrNotFound: - s.logger.Debug(err.Error(), zap.String("id", req.Id)) - return resp, status.Error(codes.NotFound, err.Error()) - default: - s.logger.Error(err.Error(), zap.String("id", req.Id)) - return resp, status.Error(codes.Internal, err.Error()) - } - } - - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(fields, fieldsAny) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", req.Id)) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.Fields = fieldsAny - - return resp, nil -} - -func (s *GRPCService) Index(ctx context.Context, req *index.IndexRequest) (*empty.Empty, error) { - resp := &empty.Empty{} - - // index - var err error - if s.raftServer.IsLeader() { - err = s.raftServer.Index(&index.Document{Id: req.Id, Fields: req.Fields}) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - resp, err = client.Index(req) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } - - return resp, nil -} - -func (s *GRPCService) Delete(ctx context.Context, req *index.DeleteRequest) (*empty.Empty, error) { - resp := &empty.Empty{} - - // delete - var err error - if s.raftServer.IsLeader() { - err = s.raftServer.Delete(req.Id) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - resp, err = client.Delete(req) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } - - return resp, nil -} - -func (s *GRPCService) BulkIndex(ctx context.Context, req *index.BulkIndexRequest) (*index.BulkIndexResponse, error) { - resp := &index.BulkIndexResponse{} - - if s.raftServer.IsLeader() { - count, err := s.raftServer.BulkIndex(req.Documents) - if err != nil { - s.logger.Error(err.Error()) - resp.Count = -1 - return resp, status.Error(codes.Internal, err.Error()) - } - resp.Count = int32(count) - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - resp, err = client.BulkIndex(req) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } - - return resp, nil -} - -func (s *GRPCService) BulkDelete(ctx context.Context, req *index.BulkDeleteRequest) (*index.BulkDeleteResponse, error) { - resp := &index.BulkDeleteResponse{} - - if s.raftServer.IsLeader() { - count, err := s.raftServer.BulkDelete(req.Ids) - if err != nil { - s.logger.Error(err.Error()) - resp.Count = -1 - return resp, status.Error(codes.Internal, err.Error()) - } - resp.Count = int32(count) - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - resp, err := client.BulkDelete(req) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } - - return resp, nil -} - -func (s *GRPCService) Search(ctx context.Context, req *index.SearchRequest) (*index.SearchResponse, error) { - resp := &index.SearchResponse{} - - searchRequest, err := protobuf.MarshalAny(req.SearchRequest) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.InvalidArgument, err.Error()) - } - - searchResult, err := s.raftServer.Search(searchRequest.(*bleve.SearchRequest)) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - searchResultAny := &any.Any{} - err = protobuf.UnmarshalAny(searchResult, searchResultAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.SearchResult = searchResultAny - - return resp, nil -} - -func (s *GRPCService) GetIndexConfig(ctx context.Context, req *empty.Empty) (*index.GetIndexConfigResponse, error) { - resp := &index.GetIndexConfigResponse{ - IndexConfig: &index.IndexConfig{}, - } - - indexConfig, err := s.raftServer.GetIndexConfig() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - if indexMapping, ok := indexConfig["index_mapping"]; ok { - indexMappingAny := &any.Any{} - err = protobuf.UnmarshalAny(indexMapping.(*mapping.IndexMappingImpl), indexMappingAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - resp.IndexConfig.IndexMapping = indexMappingAny - } - - if indexType, ok := indexConfig["index_type"]; ok { - resp.IndexConfig.IndexType = indexType.(string) - } - - if indexStorageType, ok := indexConfig["index_storage_type"]; ok { - resp.IndexConfig.IndexStorageType = indexStorageType.(string) - } - - return resp, nil -} - -func (s *GRPCService) GetIndexStats(ctx context.Context, req *empty.Empty) (*index.GetIndexStatsResponse, error) { - resp := &index.GetIndexStatsResponse{} - - indexStats, err := s.raftServer.GetIndexStats() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - indexStatsAny := &any.Any{} - err = protobuf.UnmarshalAny(indexStats, indexStatsAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.IndexStats = indexStatsAny - - return resp, nil -} - -func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { - resp := &empty.Empty{} - - err := s.raftServer.Snapshot() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} diff --git a/indexer/http_handler.go b/indexer/http_handler.go deleted file mode 100644 index 6a7353f..0000000 --- a/indexer/http_handler.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "net/http" - "time" - - "github.com/gorilla/mux" - blasthttp "github.com/mosuka/blast/http" - "github.com/mosuka/blast/version" - "github.com/prometheus/client_golang/prometheus/promhttp" - "go.uber.org/zap" -) - -type Router struct { - mux.Router - - logger *zap.Logger -} - -func NewRouter(logger *zap.Logger) (*Router, error) { - router := &Router{ - logger: logger, - } - - router.StrictSlash(true) - - router.Handle("/", NewRootHandler(logger)).Methods("GET") - router.Handle("/metrics", promhttp.Handler()).Methods("GET") - - return router, nil -} - -func (r *Router) Close() error { - return nil -} - -type RootHandler struct { - logger *zap.Logger -} - -func NewRootHandler(logger *zap.Logger) *RootHandler { - return &RootHandler{ - logger: logger, - } -} - -func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - msgMap := map[string]interface{}{ - "version": version.Version, - "status": status, - } - - content, err := blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} diff --git a/indexer/http_server.go b/indexer/http_server.go deleted file mode 100644 index 238da55..0000000 --- a/indexer/http_server.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "net" - "net/http" - - accesslog "github.com/mash/go-accesslog" - "go.uber.org/zap" -) - -type HTTPServer struct { - listener net.Listener - router *Router - - logger *zap.Logger - httpLogger accesslog.Logger -} - -func NewHTTPServer(httpAddr string, router *Router, logger *zap.Logger, httpLogger accesslog.Logger) (*HTTPServer, error) { - listener, err := net.Listen("tcp", httpAddr) - if err != nil { - return nil, err - } - - return &HTTPServer{ - listener: listener, - router: router, - logger: logger, - httpLogger: httpLogger, - }, nil -} - -func (s *HTTPServer) Start() error { - err := http.Serve( - s.listener, - accesslog.NewLoggingHandler( - s.router, - s.httpLogger, - ), - ) - if err != nil { - return err - } - - return nil -} - -func (s *HTTPServer) Stop() error { - err := s.listener.Close() - if err != nil { - return err - } - - return nil -} diff --git a/indexer/index.go b/indexer/index.go deleted file mode 100644 index 2c8a031..0000000 --- a/indexer/index.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "encoding/json" - "os" - "time" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/document" - "github.com/blevesearch/bleve/mapping" - "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/index" - "go.uber.org/zap" -) - -type Index struct { - indexMapping *mapping.IndexMappingImpl - indexType string - indexStorageType string - logger *zap.Logger - - index bleve.Index -} - -func NewIndex(dir string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, logger *zap.Logger) (*Index, error) { - //bleve.SetLog(logger) - - var index bleve.Index - _, err := os.Stat(dir) - if os.IsNotExist(err) { - // create new index - index, err = bleve.NewUsing(dir, indexMapping, indexType, indexStorageType, nil) - if err != nil { - logger.Error(err.Error()) - return nil, err - } - } else { - // open existing index - index, err = bleve.OpenUsing(dir, map[string]interface{}{ - "create_if_missing": false, - "error_if_exists": false, - }) - if err != nil { - logger.Error(err.Error()) - return nil, err - } - } - - return &Index{ - index: index, - indexMapping: indexMapping, - indexType: indexType, - indexStorageType: indexStorageType, - logger: logger, - }, nil -} - -func (i *Index) Close() error { - err := i.index.Close() - if err != nil { - i.logger.Error(err.Error()) - return err - } - - return nil -} - -func (i *Index) Get(id string) (map[string]interface{}, error) { - doc, err := i.index.Document(id) - if err != nil { - i.logger.Error(err.Error()) - return nil, err - } - if doc == nil { - return nil, errors.ErrNotFound - } - - fields := make(map[string]interface{}, 0) - for _, f := range doc.Fields { - var v interface{} - switch field := f.(type) { - case *document.TextField: - v = string(field.Value()) - case *document.NumericField: - n, err := field.Number() - if err == nil { - v = n - } - case *document.DateTimeField: - d, err := field.DateTime() - if err == nil { - v = d.Format(time.RFC3339Nano) - } - } - existing, existed := fields[f.Name()] - if existed { - switch existing := existing.(type) { - case []interface{}: - fields[f.Name()] = append(existing, v) - case interface{}: - arr := make([]interface{}, 2) - arr[0] = existing - arr[1] = v - fields[f.Name()] = arr - } - } else { - fields[f.Name()] = v - } - } - - return fields, nil -} - -func (i *Index) Search(request *bleve.SearchRequest) (*bleve.SearchResult, error) { - result, err := i.index.Search(request) - if err != nil { - i.logger.Error(err.Error()) - return nil, err - } - - return result, nil -} - -func (i *Index) Index(doc *index.Document) error { - _, err := i.BulkIndex([]*index.Document{doc}) - if err != nil { - i.logger.Error(err.Error()) - return err - } - - return nil -} - -func (i *Index) BulkIndex(docs []*index.Document) (int, error) { - batch := i.index.NewBatch() - - count := 0 - - for _, doc := range docs { - fieldsIntr, err := protobuf.MarshalAny(doc.Fields) - if err != nil { - i.logger.Error(err.Error(), zap.Any("doc", doc)) - continue - } - err = batch.Index(doc.Id, *fieldsIntr.(*map[string]interface{})) - if err != nil { - i.logger.Error(err.Error()) - continue - } - count++ - } - - err := i.index.Batch(batch) - if err != nil { - i.logger.Error(err.Error()) - return -1, err - } - - return count, nil -} - -func (i *Index) Delete(id string) error { - _, err := i.BulkDelete([]string{id}) - if err != nil { - i.logger.Error(err.Error()) - return err - } - - return nil -} - -func (i *Index) BulkDelete(ids []string) (int, error) { - batch := i.index.NewBatch() - - count := 0 - - for _, id := range ids { - batch.Delete(id) - count++ - } - - err := i.index.Batch(batch) - if err != nil { - i.logger.Error(err.Error()) - return -1, err - } - - return count, nil -} - -func (i *Index) Config() (map[string]interface{}, error) { - return map[string]interface{}{ - "index_mapping": i.indexMapping, - "index_type": i.indexType, - "index_storage_type": i.indexStorageType, - }, nil -} - -func (i *Index) Stats() (map[string]interface{}, error) { - return i.index.StatsMap(), nil -} - -func (i *Index) SnapshotItems() <-chan *index.Document { - ch := make(chan *index.Document, 1024) - - go func() { - idx, _, err := i.index.Advanced() - if err != nil { - i.logger.Error(err.Error()) - return - } - - r, err := idx.Reader() - if err != nil { - i.logger.Error(err.Error()) - return - } - - docCount := 0 - - dr, err := r.DocIDReaderAll() - for { - if dr == nil { - i.logger.Error(err.Error()) - break - } - id, err := dr.Next() - if id == nil { - i.logger.Debug("finished to read all document ids") - break - } else if err != nil { - i.logger.Warn(err.Error()) - continue - } - - // get original document - fieldsBytes, err := i.index.GetInternal(id) - - // bytes -> map[string]interface{} - var fieldsMap map[string]interface{} - err = json.Unmarshal([]byte(fieldsBytes), &fieldsMap) - if err != nil { - i.logger.Error(err.Error()) - break - } - - // map[string]interface{} -> Any - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(fieldsMap, fieldsAny) - if err != nil { - i.logger.Error(err.Error()) - break - } - - doc := &index.Document{ - Id: string(id), - Fields: fieldsAny, - } - - ch <- doc - - docCount = docCount + 1 - } - - i.logger.Debug("finished to write all documents to channel") - ch <- nil - - i.logger.Info("finished to snapshot", zap.Int("count", docCount)) - - return - }() - - return ch -} diff --git a/indexer/raft_fsm.go b/indexer/raft_fsm.go deleted file mode 100644 index da53222..0000000 --- a/indexer/raft_fsm.go +++ /dev/null @@ -1,363 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "encoding/json" - "errors" - "io" - "io/ioutil" - "sync" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/mapping" - "github.com/golang/protobuf/proto" - "github.com/hashicorp/raft" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/protobuf/index" - "go.uber.org/zap" -) - -type RaftFSM struct { - path string - indexMapping *mapping.IndexMappingImpl - indexType string - indexStorageType string - logger *zap.Logger - - cluster *index.Cluster - clusterMutex sync.RWMutex - - index *Index -} - -func NewRaftFSM(path string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, logger *zap.Logger) (*RaftFSM, error) { - return &RaftFSM{ - path: path, - indexMapping: indexMapping, - indexType: indexType, - indexStorageType: indexStorageType, - logger: logger, - }, nil -} - -func (f *RaftFSM) Start() error { - f.logger.Info("initialize cluster") - f.cluster = &index.Cluster{Nodes: make(map[string]*index.Node, 0)} - - f.logger.Info("initialize index") - var err error - f.index, err = NewIndex(f.path, f.indexMapping, f.indexType, f.indexStorageType, f.logger) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - return nil -} - -func (f *RaftFSM) Stop() error { - f.logger.Info("close index") - err := f.index.Close() - if err != nil { - f.logger.Error(err.Error()) - return err - } - - return nil -} - -func (f *RaftFSM) GetNode(nodeId string) (*index.Node, error) { - f.clusterMutex.RLock() - defer f.clusterMutex.RUnlock() - - node, ok := f.cluster.Nodes[nodeId] - if !ok { - return nil, blasterrors.ErrNotFound - } - - return node, nil -} - -func (f *RaftFSM) SetNode(node *index.Node) error { - f.clusterMutex.RLock() - defer f.clusterMutex.RUnlock() - - f.cluster.Nodes[node.Id] = node - - return nil -} - -func (f *RaftFSM) DeleteNode(nodeId string) error { - f.clusterMutex.RLock() - defer f.clusterMutex.RUnlock() - - if _, ok := f.cluster.Nodes[nodeId]; !ok { - return blasterrors.ErrNotFound - } - - delete(f.cluster.Nodes, nodeId) - - return nil -} - -func (f *RaftFSM) GetDocument(id string) (map[string]interface{}, error) { - fields, err := f.index.Get(id) - if err != nil { - switch err { - case blasterrors.ErrNotFound: - f.logger.Debug(err.Error(), zap.String("id", id)) - default: - f.logger.Error(err.Error(), zap.String("id", id)) - } - return nil, err - } - - return fields, nil -} - -func (f *RaftFSM) Index(doc *index.Document) error { - err := f.index.Index(doc) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - return nil -} - -func (f *RaftFSM) BulkIndex(docs []*index.Document) (int, error) { - count, err := f.index.BulkIndex(docs) - if err != nil { - f.logger.Error(err.Error()) - return -1, err - } - - return count, nil -} - -func (f *RaftFSM) Delete(id string) error { - err := f.index.Delete(id) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - return nil -} - -func (f *RaftFSM) BulkDelete(ids []string) (int, error) { - count, err := f.index.BulkDelete(ids) - if err != nil { - f.logger.Error(err.Error()) - return -1, err - } - - return count, nil -} - -func (f *RaftFSM) Search(request *bleve.SearchRequest) (*bleve.SearchResult, error) { - result, err := f.index.Search(request) - if err != nil { - f.logger.Error(err.Error()) - return nil, err - } - - return result, nil -} - -func (f *RaftFSM) GetIndexConfig() (map[string]interface{}, error) { - return f.index.Config() -} - -func (f *RaftFSM) GetIndexStats() (map[string]interface{}, error) { - return f.index.Stats() -} - -type fsmResponse struct { - error error -} - -type fsmBulkIndexResponse struct { - count int - error error -} - -type fsmBulkDeleteResponse struct { - count int - error error -} - -func (f *RaftFSM) Apply(l *raft.Log) interface{} { - proposal := &index.Proposal{} - err := proto.Unmarshal(l.Data, proposal) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - switch proposal.Event { - case index.Proposal_SET_NODE: - err = f.SetNode(proposal.Node) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - return &fsmResponse{error: nil} - case index.Proposal_DELETE_NODE: - err = f.DeleteNode(proposal.Node.Id) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - return &fsmResponse{error: nil} - case index.Proposal_INDEX: - err := f.Index(proposal.Document) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - return &fsmResponse{error: nil} - case index.Proposal_DELETE: - err := f.Delete(proposal.Id) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - return &fsmResponse{error: nil} - case index.Proposal_BULK_INDEX: - count, err := f.BulkIndex(proposal.Documents) - if err != nil { - f.logger.Error(err.Error()) - return &fsmBulkIndexResponse{count: count, error: err} - } - return &fsmBulkIndexResponse{count: count, error: nil} - case index.Proposal_BULK_DELETE: - count, err := f.BulkDelete(proposal.Ids) - if err != nil { - f.logger.Error(err.Error()) - return &fsmBulkDeleteResponse{count: count, error: err} - } - return &fsmBulkDeleteResponse{count: count, error: nil} - default: - err = errors.New("unsupported command") - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } -} - -func (f *RaftFSM) Snapshot() (raft.FSMSnapshot, error) { - f.logger.Info("snapshot") - - return &RaftFSMSnapshot{ - index: f.index, - logger: f.logger, - }, nil -} - -func (f *RaftFSM) Restore(rc io.ReadCloser) error { - f.logger.Info("restore") - - defer func() { - err := rc.Close() - if err != nil { - f.logger.Error(err.Error()) - } - }() - - data, err := ioutil.ReadAll(rc) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - docCount := 0 - - buff := proto.NewBuffer(data) - for { - doc := &index.Document{} - err = buff.DecodeMessage(doc) - if err == io.ErrUnexpectedEOF { - break - } - if err != nil { - f.logger.Error(err.Error()) - continue - } - - err = f.index.Index(doc) - if err != nil { - f.logger.Error(err.Error()) - continue - } - - docCount = docCount + 1 - } - - f.logger.Info("restore", zap.Int("count", docCount)) - - return nil -} - -// --------------------- - -type RaftFSMSnapshot struct { - index *Index - logger *zap.Logger -} - -func (f *RaftFSMSnapshot) Persist(sink raft.SnapshotSink) error { - f.logger.Info("persist") - - defer func() { - err := sink.Close() - if err != nil { - f.logger.Error(err.Error()) - } - }() - - ch := f.index.SnapshotItems() - - docCount := 0 - - for { - doc := <-ch - if doc == nil { - break - } - - docBytes, err := json.Marshal(doc) - if err != nil { - f.logger.Error(err.Error()) - continue - } - - _, err = sink.Write(docBytes) - if err != nil { - f.logger.Error(err.Error()) - continue - } - - docCount = docCount + 1 - } - - f.logger.Info("persist", zap.Int("count", docCount)) - - return nil -} - -func (f *RaftFSMSnapshot) Release() { - f.logger.Info("release") -} diff --git a/indexer/raft_server.go b/indexer/raft_server.go deleted file mode 100644 index 6c2c41c..0000000 --- a/indexer/raft_server.go +++ /dev/null @@ -1,688 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "errors" - "io/ioutil" - "net" - "os" - "path/filepath" - "time" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/mapping" - "github.com/golang/protobuf/proto" - "github.com/hashicorp/raft" - raftboltdb "github.com/hashicorp/raft-boltdb" - _ "github.com/mosuka/blast/builtins" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/protobuf/index" - "go.uber.org/zap" - //raftmdb "github.com/hashicorp/raft-mdb" -) - -type RaftServer struct { - node *index.Node - dataDir string - raftStorageType string - indexMapping *mapping.IndexMappingImpl - indexType string - indexStorageType string - bootstrap bool - logger *zap.Logger - - transport *raft.NetworkTransport - raft *raft.Raft - fsm *RaftFSM -} - -func NewRaftServer(node *index.Node, dataDir string, raftStorageType string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { - return &RaftServer{ - node: node, - dataDir: dataDir, - raftStorageType: raftStorageType, - indexMapping: indexMapping, - indexType: indexType, - indexStorageType: indexStorageType, - bootstrap: bootstrap, - logger: logger, - }, nil -} - -func (s *RaftServer) Start() error { - var err error - - fsmPath := filepath.Join(s.dataDir, "index") - s.logger.Info("create finite state machine", zap.String("path", fsmPath)) - s.fsm, err = NewRaftFSM(fsmPath, s.indexMapping, s.indexType, s.indexStorageType, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("start finite state machine") - err = s.fsm.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("create Raft config", zap.String("id", s.node.Id)) - raftConfig := raft.DefaultConfig() - raftConfig.LocalID = raft.ServerID(s.node.Id) - raftConfig.SnapshotThreshold = 1024 - raftConfig.LogOutput = ioutil.Discard - //if s.bootstrap { - // raftConfig.StartAsLeader = true - //} - - s.logger.Info("resolve TCP address", zap.String("bind_addr", s.node.BindAddress)) - addr, err := net.ResolveTCPAddr("tcp", s.node.BindAddress) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("create TCP transport", zap.String("bind_addr", s.node.BindAddress)) - s.transport, err = raft.NewTCPTransport(s.node.BindAddress, addr, 3, 10*time.Second, ioutil.Discard) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - snapshotPath := s.dataDir - s.logger.Info("create snapshot store", zap.String("path", snapshotPath)) - snapshotStore, err := raft.NewFileSnapshotStore(snapshotPath, 2, ioutil.Discard) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("create Raft machine") - var logStore raft.LogStore - var stableStore raft.StableStore - switch s.raftStorageType { - case "boltdb": - logStorePath := filepath.Join(s.dataDir, "raft", "log", "boltdb.db") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Dir(logStorePath), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - logStore, err = raftboltdb.NewBoltStore(logStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStorePath := filepath.Join(s.dataDir, "raft", "stable", "boltdb.db") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Dir(stableStorePath), 0755) - stableStore, err = raftboltdb.NewBoltStore(stableStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - //case "badger": - // logStorePath := filepath.Join(s.dataDir, "raft", "log") - // s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) - // err = os.MkdirAll(filepath.Join(logStorePath, "badger"), 0755) - // if err != nil { - // s.logger.Fatal(err.Error()) - // return err - // } - // logStore, err = raftbadgerdb.NewBadgerStore(logStorePath) - // if err != nil { - // s.logger.Fatal(err.Error()) - // return err - // } - // stableStorePath := filepath.Join(s.dataDir, "raft", "stable") - // s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) - // err = os.MkdirAll(filepath.Join(stableStorePath, "badger"), 0755) - // if err != nil { - // s.logger.Fatal(err.Error()) - // return err - // } - // stableStore, err = raftbadgerdb.NewBadgerStore(stableStorePath) - // if err != nil { - // s.logger.Fatal(err.Error()) - // return err - // } - default: - logStorePath := filepath.Join(s.dataDir, "raft", "log", "boltdb.db") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Dir(logStorePath), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - logStore, err = raftboltdb.NewBoltStore(logStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStorePath := filepath.Join(s.dataDir, "raft", "stable", "boltdb.db") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Dir(stableStorePath), 0755) - stableStore, err = raftboltdb.NewBoltStore(stableStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - } - - s.logger.Info("create Raft machine") - s.raft, err = raft.NewRaft(raftConfig, s.fsm, logStore, stableStore, snapshotStore, s.transport) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - if s.bootstrap { - s.logger.Info("configure Raft machine as bootstrap") - configuration := raft.Configuration{ - Servers: []raft.Server{ - { - ID: raftConfig.LocalID, - Address: s.transport.LocalAddr(), - }, - }, - } - s.raft.BootstrapCluster(configuration) - - s.logger.Info("wait for become a leader") - err = s.WaitForDetectLeader(60 * time.Second) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - // set node config - s.logger.Info("register its own node config", zap.Any("node", s.node)) - err = s.setNode(s.node) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - } - - return nil -} - -func (s *RaftServer) Stop() error { - s.logger.Info("shutdown Raft machine") - f := s.raft.Shutdown() - err := f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - s.logger.Info("stop finite state machine") - err = s.fsm.Stop() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) LeaderAddress(timeout time.Duration) (raft.ServerAddress, error) { - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - - timer := time.NewTimer(timeout) - defer timer.Stop() - - for { - select { - case <-ticker.C: - leaderAddr := s.raft.Leader() - if leaderAddr != "" { - s.logger.Debug("detect a leader", zap.String("address", string(leaderAddr))) - return leaderAddr, nil - } - case <-timer.C: - s.logger.Error("timeout exceeded") - return "", blasterrors.ErrTimeout - } - } -} - -func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { - leaderAddr, err := s.LeaderAddress(timeout) - if err != nil { - s.logger.Error(err.Error()) - return "", err - } - - cf := s.raft.GetConfiguration() - err = cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return "", err - } - - for _, server := range cf.Configuration().Servers { - if server.Address == leaderAddr { - return server.ID, nil - } - } - - s.logger.Error(blasterrors.ErrNotFoundLeader.Error()) - return "", blasterrors.ErrNotFoundLeader -} - -func (s *RaftServer) NodeAddress() string { - return string(s.transport.LocalAddr()) -} - -func (s *RaftServer) NodeID() string { - return s.node.Id -} - -func (s *RaftServer) Stats() map[string]string { - return s.raft.Stats() -} - -func (s *RaftServer) State() raft.RaftState { - return s.raft.State() -} - -func (s *RaftServer) IsLeader() bool { - return s.State() == raft.Leader -} - -func (s *RaftServer) WaitForDetectLeader(timeout time.Duration) error { - _, err := s.LeaderAddress(timeout) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) getNode(nodeId string) (*index.Node, error) { - nodeConfig, err := s.fsm.GetNode(nodeId) - if err != nil { - s.logger.Debug(err.Error(), zap.String("id", nodeId)) - return nil, err - } - - return nodeConfig, nil -} - -func (s *RaftServer) setNode(node *index.Node) error { - proposal := &index.Proposal{ - Event: index.Proposal_SET_NODE, - Node: node, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) - return err - } - - return nil -} - -func (s *RaftServer) deleteNode(nodeId string) error { - proposal := &index.Proposal{ - Event: index.Proposal_DELETE_NODE, - Node: &index.Node{ - Id: nodeId, - }, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err - } - - return nil -} - -func (s *RaftServer) GetNode(id string) (*index.Node, error) { - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - var node *index.Node - for _, server := range cf.Configuration().Servers { - if server.ID == raft.ServerID(id) { - node, err = s.getNode(id) - if err != nil { - s.logger.Debug(err.Error(), zap.String("id", id)) - return nil, err - } - break - } - } - - return node, nil -} - -func (s *RaftServer) SetNode(node *index.Node) error { - if !s.IsLeader() { - s.logger.Warn(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - for _, server := range cf.Configuration().Servers { - if server.ID == raft.ServerID(node.Id) { - s.logger.Info("node already joined the cluster", zap.Any("id", node.Id)) - return nil - } - } - - if node.BindAddress == "" { - err = errors.New("missing bind address") - s.logger.Error(err.Error(), zap.String("bind_addr", node.BindAddress)) - return err - } - - // add node to Raft cluster - s.logger.Info("join the node to the raft cluster", zap.String("id", node.Id), zap.Any("bind_address", node.BindAddress)) - f := s.raft.AddVoter(raft.ServerID(node.Id), raft.ServerAddress(node.BindAddress), 0, 0) - err = f.Error() - if err != nil { - s.logger.Error(err.Error(), zap.String("id", node.Id), zap.String("bind_address", node.BindAddress)) - return err - } - - // set node config - err = s.setNode(node) - if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) - return err - } - - return nil -} - -func (s *RaftServer) DeleteNode(nodeId string) error { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err - } - - // delete node from Raft cluster - for _, server := range cf.Configuration().Servers { - if server.ID == raft.ServerID(nodeId) { - s.logger.Info("remove the node from the raft cluster", zap.String("id", nodeId)) - f := s.raft.RemoveServer(server.ID, 0, 0) - err = f.Error() - if err != nil { - s.logger.Error(err.Error(), zap.String("id", string(server.ID))) - return err - } - } - } - - // delete node config - err = s.deleteNode(nodeId) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err - } - - return nil -} - -func (s *RaftServer) GetCluster() (*index.Cluster, error) { - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - cluster := &index.Cluster{Nodes: make(map[string]*index.Node, 0)} - for _, server := range cf.Configuration().Servers { - node, err := s.GetNode(string(server.ID)) - if err != nil { - s.logger.Debug(err.Error(), zap.String("id", string(server.ID))) - continue - } - - cluster.Nodes[string(server.ID)] = node - } - - return cluster, nil -} - -func (s *RaftServer) Snapshot() error { - f := s.raft.Snapshot() - err := f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) Get(id string) (map[string]interface{}, error) { - fields, err := s.fsm.GetDocument(id) - if err != nil { - switch err { - case blasterrors.ErrNotFound: - s.logger.Debug(err.Error(), zap.String("id", id)) - default: - s.logger.Error(err.Error(), zap.String("id", id)) - } - return nil, err - } - - return fields, nil -} - -func (s *RaftServer) Index(doc *index.Document) error { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - proposal := &index.Proposal{ - Event: index.Proposal_INDEX, - Document: doc, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) Delete(id string) error { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - proposal := &index.Proposal{ - Event: index.Proposal_DELETE, - Id: id, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) BulkIndex(docs []*index.Document) (int, error) { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return -1, raft.ErrNotLeader - } - - proposal := &index.Proposal{ - Event: index.Proposal_BULK_INDEX, - Documents: docs, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - err = f.Response().(*fsmBulkIndexResponse).error - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - - return f.Response().(*fsmBulkIndexResponse).count, nil -} - -func (s *RaftServer) BulkDelete(ids []string) (int, error) { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return -1, raft.ErrNotLeader - } - - proposal := &index.Proposal{ - Event: index.Proposal_BULK_DELETE, - Ids: ids, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - err = f.Response().(*fsmBulkDeleteResponse).error - if err != nil { - s.logger.Error(err.Error()) - return -1, err - } - - return f.Response().(*fsmBulkDeleteResponse).count, nil -} - -func (s *RaftServer) Search(request *bleve.SearchRequest) (*bleve.SearchResult, error) { - result, err := s.fsm.Search(request) - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return result, nil -} - -func (s *RaftServer) GetIndexConfig() (map[string]interface{}, error) { - indexConfig, err := s.fsm.GetIndexConfig() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return indexConfig, nil -} - -func (s *RaftServer) GetIndexStats() (map[string]interface{}, error) { - indexStats, err := s.fsm.GetIndexStats() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return indexStats, nil -} diff --git a/indexer/server.go b/indexer/server.go deleted file mode 100644 index dbea38b..0000000 --- a/indexer/server.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "encoding/json" - "fmt" - - accesslog "github.com/mash/go-accesslog" - "github.com/mosuka/blast/indexutils" - - "github.com/mosuka/blast/protobuf/management" - - "github.com/blevesearch/bleve/mapping" - "github.com/golang/protobuf/ptypes/empty" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/manager" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/index" - "go.uber.org/zap" -) - -type Server struct { - managerGrpcAddress string - shardId string - peerGrpcAddress string - node *index.Node - dataDir string - raftStorageType string - indexMapping *mapping.IndexMappingImpl - indexType string - indexStorageType string - logger *zap.Logger - grpcLogger *zap.Logger - httpLogger accesslog.Logger - - raftServer *RaftServer - grpcService *GRPCService - grpcServer *GRPCServer - grpcGateway *GRPCGateway - httpRouter *Router - httpServer *HTTPServer -} - -func NewServer(managerGrpcAddress string, shardId string, peerGrpcAddress string, node *index.Node, dataDir string, raftStorageType string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { - return &Server{ - managerGrpcAddress: managerGrpcAddress, - shardId: shardId, - peerGrpcAddress: peerGrpcAddress, - node: node, - dataDir: dataDir, - raftStorageType: raftStorageType, - indexMapping: indexMapping, - indexType: indexType, - indexStorageType: indexStorageType, - logger: logger, - grpcLogger: grpcLogger, - httpLogger: httpLogger, - }, nil -} - -func (s *Server) Start() { - // get peer from manager - if s.managerGrpcAddress != "" { - s.logger.Info("connect to manager", zap.String("manager_grpc_addr", s.managerGrpcAddress)) - - mc, err := manager.NewGRPCClient(s.managerGrpcAddress) - defer func() { - s.logger.Debug("close client", zap.String("address", mc.GetAddress())) - err = mc.Close() - if err != nil { - s.logger.Error(err.Error()) - return - } - }() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - req := &management.GetRequest{ - Key: fmt.Sprintf("cluster/shards/%s", s.shardId), - } - res, err := mc.Get(req) - if err != nil && err != blasterrors.ErrNotFound { - s.logger.Fatal(err.Error()) - return - } - value, err := protobuf.MarshalAny(res.Value) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - if value != nil { - nodes := *value.(*map[string]interface{}) - nodesBytes, err := json.Marshal(nodes) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - var cluster *index.Cluster - err = json.Unmarshal(nodesBytes, &cluster) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - for id, node := range cluster.Nodes { - if id == s.node.Id { - s.logger.Debug("skip own node id", zap.String("id", id)) - continue - } - - s.logger.Info("peer node detected", zap.String("peer_grpc_addr", node.Metadata.GrpcAddress)) - s.peerGrpcAddress = node.Metadata.GrpcAddress - break - } - } - } - - //get index config from manager or peer - if s.managerGrpcAddress != "" { - mc, err := manager.NewGRPCClient(s.managerGrpcAddress) - defer func() { - s.logger.Debug("close client", zap.String("address", mc.GetAddress())) - err = mc.Close() - if err != nil { - s.logger.Error(err.Error()) - return - } - }() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - s.logger.Debug("pull index config from manager", zap.String("address", mc.GetAddress())) - req := &management.GetRequest{ - Key: "/index_config", - } - resp, err := mc.Get(req) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - value, err := protobuf.MarshalAny(resp.Value) - if value != nil { - indexConfigMap := *value.(*map[string]interface{}) - indexMappingSrc, ok := indexConfigMap["index_mapping"].(map[string]interface{}) - if ok { - indexMappingBytes, err := json.Marshal(indexMappingSrc) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - s.indexMapping, err = indexutils.NewIndexMappingFromBytes(indexMappingBytes) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - } - indexTypeSrc, ok := indexConfigMap["index_type"] - if ok { - s.indexType = indexTypeSrc.(string) - } - indexStorageTypeSrc, ok := indexConfigMap["index_storage_type"] - if ok { - s.indexStorageType = indexStorageTypeSrc.(string) - } - } - } else if s.peerGrpcAddress != "" { - pc, err := NewGRPCClient(s.peerGrpcAddress) - defer func() { - s.logger.Debug("close client", zap.String("address", pc.GetAddress())) - err = pc.Close() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - s.logger.Debug("pull index config from cluster peer", zap.String("address", pc.GetAddress())) - req := &empty.Empty{} - res, err := pc.GetIndexConfig(req) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - indexMapping, err := protobuf.MarshalAny(res.IndexConfig.IndexMapping) - s.indexMapping = indexMapping.(*mapping.IndexMappingImpl) - s.indexType = res.IndexConfig.IndexType - s.indexStorageType = res.IndexConfig.IndexStorageType - } - - // bootstrap node? - bootstrap := s.peerGrpcAddress == "" - s.logger.Info("bootstrap", zap.Bool("bootstrap", bootstrap)) - - var err error - - // create raft server - s.raftServer, err = NewRaftServer(s.node, s.dataDir, s.raftStorageType, s.indexMapping, s.indexType, s.indexStorageType, bootstrap, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC service - s.grpcService, err = NewGRPCService(s.managerGrpcAddress, s.shardId, s.raftServer, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC server - s.grpcServer, err = NewGRPCServer(s.node.Metadata.GrpcAddress, s.grpcService, s.grpcLogger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC gateway - s.grpcGateway, err = NewGRPCGateway(s.node.Metadata.GrpcGatewayAddress, s.node.Metadata.GrpcAddress, s.logger) - if err != nil { - s.logger.Error(err.Error()) - return - } - - // create HTTP router - s.httpRouter, err = NewRouter(s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create HTTP server - s.httpServer, err = NewHTTPServer(s.node.Metadata.HttpAddress, s.httpRouter, s.logger, s.httpLogger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // start Raft server - s.logger.Info("start Raft server") - err = s.raftServer.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // start gRPC service - s.logger.Info("start gRPC service") - go func() { - err := s.grpcService.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - - // start gRPC server - s.logger.Info("start gRPC server") - go func() { - err := s.grpcServer.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - - // start gRPC gateway - s.logger.Info("start gRPC gateway") - go func() { - _ = s.grpcGateway.Start() - }() - - // start HTTP server - s.logger.Info("start HTTP server") - go func() { - _ = s.httpServer.Start() - }() - - // join to the existing cluster - if !bootstrap { - client, err := NewGRPCClient(s.peerGrpcAddress) - defer func() { - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - }() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - req := &index.ClusterJoinRequest{ - Node: s.node, - } - - _, err = client.ClusterJoin(req) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - } -} - -func (s *Server) Stop() { - s.logger.Info("stop HTTP server") - err := s.httpServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop HTTP router") - err = s.httpRouter.Close() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC gateway") - err = s.grpcGateway.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC server") - err = s.grpcServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC service") - err = s.grpcService.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop Raft server") - err = s.raftServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } -} diff --git a/indexer/server_test.go b/indexer/server_test.go deleted file mode 100644 index 7563ed3..0000000 --- a/indexer/server_test.go +++ /dev/null @@ -1,2177 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexer - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "testing" - "time" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/mapping" - "github.com/golang/protobuf/ptypes/empty" - "github.com/google/go-cmp/cmp" - "github.com/mosuka/blast/indexutils" - "github.com/mosuka/blast/logutils" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/index" - "github.com/mosuka/blast/strutils" - "github.com/mosuka/blast/testutils" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func TestServer_Start(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) -} - -func TestServer_LivenessProbe(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // healthiness - reqHealthiness := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_HEALTHINESS} - resHealthiness, err := client.NodeHealthCheck(reqHealthiness) - if err != nil { - t.Fatalf("%v", err) - } - expHealthinessState := index.NodeHealthCheckResponse_HEALTHY - actHealthinessState := resHealthiness.State - if expHealthinessState != actHealthinessState { - t.Fatalf("expected content to see %v, saw %v", expHealthinessState, actHealthinessState) - } - - // liveness - reqLiveness := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_LIVENESS} - resLiveness, err := client.NodeHealthCheck(reqLiveness) - if err != nil { - t.Fatalf("%v", err) - } - expLivenessState := index.NodeHealthCheckResponse_ALIVE - actLivenessState := resLiveness.State - if expLivenessState != actLivenessState { - t.Fatalf("expected content to see %v, saw %v", expLivenessState, actLivenessState) - } - - // readiness - reqReadiness := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_READINESS} - resReadiness, err := client.NodeHealthCheck(reqReadiness) - if err != nil { - t.Fatalf("%v", err) - } - expReadinessState := index.NodeHealthCheckResponse_READY - actReadinessState := resReadiness.State - if expReadinessState != actReadinessState { - t.Fatalf("expected content to see %v, saw %v", expReadinessState, actReadinessState) - } -} - -func TestServer_GetNode(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get node - req := &empty.Empty{} - res, err := client.NodeInfo(req) - if err != nil { - t.Fatalf("%v", err) - } - expNodeInfo := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_LEADER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - actNodeInfo := res.Node - if !reflect.DeepEqual(expNodeInfo, actNodeInfo) { - t.Fatalf("expected content to see %v, saw %v", expNodeInfo, actNodeInfo) - } -} - -func TestServer_GetCluster(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get cluster - req := &empty.Empty{} - res, err := client.ClusterInfo(req) - if err != nil { - t.Fatalf("%v", err) - } - expCluster := &index.Cluster{ - Nodes: map[string]*index.Node{ - nodeId: { - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_LEADER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - }, - }, - } - actCluster := res.Cluster - if !reflect.DeepEqual(expCluster, actCluster) { - t.Fatalf("expected content to see %v, saw %v", expCluster, actCluster) - } -} - -func TestServer_GetIndexMapping(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - expIndexMapping := indexMapping - - req := &empty.Empty{} - res, err := client.GetIndexConfig(req) - if err != nil { - t.Fatalf("%v", err) - } - - im, err := protobuf.MarshalAny(res.IndexConfig.IndexMapping) - if err != nil { - t.Fatalf("%v", err) - } - actIndexMapping := im.(*mapping.IndexMappingImpl) - - exp, err := json.Marshal(expIndexMapping) - if err != nil { - t.Fatalf("%v", err) - } - act, err := json.Marshal(actIndexMapping) - if err != nil { - t.Fatalf("%v", err) - } - - if !reflect.DeepEqual(exp, act) { - t.Fatalf("expected content to see %v, saw %v", exp, act) - } -} - -func TestServer_GetIndexType(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - expIndexType := indexType - - req := &empty.Empty{} - res, err := client.GetIndexConfig(req) - if err != nil { - t.Fatalf("%v", err) - } - - actIndexType := res.IndexConfig.IndexType - - if !reflect.DeepEqual(expIndexType, actIndexType) { - t.Fatalf("expected content to see %v, saw %v", expIndexType, actIndexType) - } -} - -func TestServer_GetIndexStorageType(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - expIndexStorageType := indexStorageType - - req := &empty.Empty{} - res, err := client.GetIndexConfig(req) - if err != nil { - t.Fatalf("%v", err) - } - - actIndexStorageType := res.IndexConfig.IndexStorageType - - if !reflect.DeepEqual(expIndexStorageType, actIndexStorageType) { - t.Fatalf("expected content to see %v, saw %v", expIndexStorageType, actIndexStorageType) - } -} - -func TestServer_GetIndexStats(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - expIndexStats := map[string]interface{}{ - "index": map[string]interface{}{ - "analysis_time": float64(0), - "batches": float64(0), - "deletes": float64(0), - "errors": float64(0), - "index_time": float64(0), - "num_plain_text_bytes_indexed": float64(0), - "term_searchers_finished": float64(0), - "term_searchers_started": float64(0), - "updates": float64(0), - }, - "search_time": float64(0), - "searches": float64(0), - } - - req := &empty.Empty{} - res, err := client.GetIndexStats(req) - if err != nil { - t.Fatalf("%v", err) - } - - is, err := protobuf.MarshalAny(res.IndexStats) - if err != nil { - t.Fatalf("%v", err) - } - actIndexStats := *is.(*map[string]interface{}) - - if !reflect.DeepEqual(expIndexStats, actIndexStats) { - t.Fatalf("expected content to see %v, saw %v", expIndexStats, actIndexStats) - } -} - -func TestServer_Index(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // index document - docPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - docFile1, err := os.Open(docPath1) - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - _ = docFile1.Close() - }() - docBytes1, err := ioutil.ReadAll(docFile1) - if err != nil { - t.Fatalf("%v", err) - } - doc1 := &index.Document{} - err = index.UnmarshalDocument(docBytes1, doc1) - if err != nil { - t.Fatalf("%v", err) - } - req := &index.IndexRequest{ - Id: doc1.Id, - Fields: doc1.Fields, - } - _, err = client.Index(req) - if err != nil { - t.Fatalf("%v", err) - } -} - -func TestServer_Get(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // index document - docPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - docFile1, err := os.Open(docPath1) - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - _ = docFile1.Close() - }() - docBytes1, err := ioutil.ReadAll(docFile1) - if err != nil { - t.Fatalf("%v", err) - } - doc1 := &index.Document{} - err = index.UnmarshalDocument(docBytes1, doc1) - if err != nil { - t.Fatalf("%v", err) - } - indexReq := &index.IndexRequest{ - Id: doc1.Id, - Fields: doc1.Fields, - } - _, err = client.Index(indexReq) - if err != nil { - t.Fatalf("%v", err) - } - - // get document - getReq := &index.GetRequest{Id: "enwiki_1"} - getRes, err := client.Get(getReq) - if err != nil { - t.Fatalf("%v", err) - } - expFields, err := protobuf.MarshalAny(doc1.Fields) - if err != nil { - t.Fatalf("%v", err) - } - actFields, err := protobuf.MarshalAny(getRes.Fields) - if err != nil { - t.Fatalf("%v", err) - } - if !cmp.Equal(expFields, actFields) { - t.Fatalf("expected content to see %v, saw %v", expFields, actFields) - } - - // get non-existing document - getReq2 := &index.GetRequest{Id: "non-existing"} - getRes2, err := client.Get(getReq2) - if err != nil { - st, _ := status.FromError(err) - switch st.Code() { - case codes.NotFound: - // noop - default: - t.Fatalf("%v", err) - } - } - if getRes2 != nil { - t.Fatalf("expected content to see nil, saw %v", getRes2) - } -} - -func TestServer_Delete(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // index document - docPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - docFile1, err := os.Open(docPath1) - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - _ = docFile1.Close() - }() - docBytes1, err := ioutil.ReadAll(docFile1) - if err != nil { - t.Fatalf("%v", err) - } - doc1 := &index.Document{} - err = index.UnmarshalDocument(docBytes1, doc1) - if err != nil { - t.Fatalf("%v", err) - } - indexReq := &index.IndexRequest{ - Id: doc1.Id, - Fields: doc1.Fields, - } - _, err = client.Index(indexReq) - if err != nil { - t.Fatalf("%v", err) - } - - // get document - getReq := &index.GetRequest{Id: "enwiki_1"} - getRes, err := client.Get(getReq) - if err != nil { - t.Fatalf("%v", err) - } - expFields, err := protobuf.MarshalAny(doc1.Fields) - if err != nil { - t.Fatalf("%v", err) - } - actFields, err := protobuf.MarshalAny(getRes.Fields) - if err != nil { - t.Fatalf("%v", err) - } - if !cmp.Equal(expFields, actFields) { - t.Fatalf("expected content to see %v, saw %v", expFields, actFields) - } - - // delete document - deleteReq := &index.DeleteRequest{Id: "enwiki_1"} - _, err = client.Delete(deleteReq) - if err != nil { - t.Fatalf("%v", err) - } - - // get document again - getRes, err = client.Get(getReq) - if err != nil { - st, _ := status.FromError(err) - switch st.Code() { - case codes.NotFound: - // noop - default: - t.Fatalf("%v", err) - } - } - if getRes != nil { - t.Fatalf("expected content to see nil, saw %v", getRes) - } - - // delete non-existing document - deleteReq2 := &index.DeleteRequest{Id: "non-existing"} - _, err = client.Delete(deleteReq2) - if err != nil { - st, _ := status.FromError(err) - switch st.Code() { - case codes.NotFound: - // noop - default: - t.Fatalf("%v", err) - } - } -} - -func TestServer_Search(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress := "" - shardId := "" - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &index.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // index document - docPath1 := filepath.Join(curDir, "../example/wiki_doc_enwiki_1.json") - docFile1, err := os.Open(docPath1) - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - _ = docFile1.Close() - }() - docBytes1, err := ioutil.ReadAll(docFile1) - if err != nil { - t.Fatalf("%v", err) - } - doc1 := &index.Document{} - err = index.UnmarshalDocument(docBytes1, doc1) - if err != nil { - t.Fatalf("%v", err) - } - indexReq := &index.IndexRequest{ - Id: doc1.Id, - Fields: doc1.Fields, - } - _, err = client.Index(indexReq) - if err != nil { - t.Fatalf("%v", err) - } - - // get document - getReq := &index.GetRequest{Id: "enwiki_1"} - getRes, err := client.Get(getReq) - if err != nil { - t.Fatalf("%v", err) - } - expFields, err := protobuf.MarshalAny(doc1.Fields) - if err != nil { - t.Fatalf("%v", err) - } - actFields, err := protobuf.MarshalAny(getRes.Fields) - if err != nil { - t.Fatalf("%v", err) - } - if !cmp.Equal(expFields, actFields) { - t.Fatalf("expected content to see %v, saw %v", expFields, actFields) - } - - // search - searchRequestPath := filepath.Join(curDir, "../example/wiki_search_request.json") - searchRequestFile, err := os.Open(searchRequestPath) - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - _ = searchRequestFile.Close() - }() - searchRequestByte, err := ioutil.ReadAll(searchRequestFile) - if err != nil { - t.Fatalf("%v", err) - } - - searchReq := &index.SearchRequest{} - marshaler := JsonMarshaler{} - err = marshaler.Unmarshal(searchRequestByte, searchReq) - if err != nil { - t.Fatalf("%v", err) - } - searchRes, err := client.Search(searchReq) - if err != nil { - t.Fatalf("%v", err) - } - searchResult, err := protobuf.MarshalAny(searchRes.SearchResult) - if err != nil { - t.Fatalf("%v", err) - } - expTotal := uint64(1) - actTotal := searchResult.(*bleve.SearchResult).Total - if expTotal != actTotal { - t.Fatalf("expected content to see %v, saw %v", expTotal, actTotal) - } -} - -func TestCluster_Start(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress1 := "" - shardId1 := "" - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &index.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - server1.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - managerGrpcAddress2 := "" - shardId2 := "" - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &index.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - server2.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - managerGrpcAddress3 := "" - shardId3 := "" - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &index.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - server3.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) -} - -func TestCluster_HealthCheck(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress1 := "" - shardId1 := "" - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &index.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - server1.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - managerGrpcAddress2 := "" - shardId2 := "" - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &index.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - server2.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - managerGrpcAddress3 := "" - shardId3 := "" - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &index.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - server3.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - - healthinessReq := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_HEALTHINESS} - livenessReq := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_LIVENESS} - readinessReq := &index.NodeHealthCheckRequest{Probe: index.NodeHealthCheckRequest_READINESS} - - // healthiness - healthinessRes1, err := client1.NodeHealthCheck(healthinessReq) - if err != nil { - t.Fatalf("%v", err) - } - expHealthiness1 := index.NodeHealthCheckResponse_HEALTHY - actHealthiness1 := healthinessRes1.State - if expHealthiness1 != actHealthiness1 { - t.Fatalf("expected content to see %v, saw %v", expHealthiness1, actHealthiness1) - } - - // liveness - livenessRes1, err := client1.NodeHealthCheck(livenessReq) - if err != nil { - t.Fatalf("%v", err) - } - expLiveness1 := index.NodeHealthCheckResponse_ALIVE - actLiveness1 := livenessRes1.State - if expLiveness1 != actLiveness1 { - t.Fatalf("expected content to see %v, saw %v", expLiveness1, actLiveness1) - } - - // readiness - readinessRes1, err := client1.NodeHealthCheck(readinessReq) - if err != nil { - t.Fatalf("%v", err) - } - expReadiness1 := index.NodeHealthCheckResponse_READY - actReadiness1 := readinessRes1.State - if expReadiness1 != actReadiness1 { - t.Fatalf("expected content to see %v, saw %v", expReadiness1, actReadiness1) - } - - // healthiness - healthinessRes2, err := client2.NodeHealthCheck(healthinessReq) - if err != nil { - t.Fatalf("%v", err) - } - expHealthiness2 := index.NodeHealthCheckResponse_HEALTHY - actHealthiness2 := healthinessRes2.State - if expHealthiness2 != actHealthiness2 { - t.Fatalf("expected content to see %v, saw %v", expHealthiness2, actHealthiness2) - } - - // liveness - livenessRes2, err := client2.NodeHealthCheck(livenessReq) - if err != nil { - t.Fatalf("%v", err) - } - expLiveness2 := index.NodeHealthCheckResponse_ALIVE - actLiveness2 := livenessRes2.State - if expLiveness2 != actLiveness2 { - t.Fatalf("expected content to see %v, saw %v", expLiveness2, actLiveness2) - } - - // readiness - readinessRes2, err := client2.NodeHealthCheck(readinessReq) - if err != nil { - t.Fatalf("%v", err) - } - expReadiness2 := index.NodeHealthCheckResponse_READY - actReadiness2 := readinessRes2.State - if expReadiness2 != actReadiness2 { - t.Fatalf("expected content to see %v, saw %v", expReadiness2, actReadiness2) - } - - // healthiness - healthinessRes3, err := client3.NodeHealthCheck(healthinessReq) - if err != nil { - t.Fatalf("%v", err) - } - expHealthiness3 := index.NodeHealthCheckResponse_HEALTHY - actHealthiness3 := healthinessRes3.State - if expHealthiness3 != actHealthiness3 { - t.Fatalf("expected content to see %v, saw %v", expHealthiness3, actHealthiness3) - } - - // liveness - livenessRes3, err := client3.NodeHealthCheck(livenessReq) - if err != nil { - t.Fatalf("%v", err) - } - expLiveness3 := index.NodeHealthCheckResponse_ALIVE - actLiveness3 := livenessRes3.State - if expLiveness3 != actLiveness3 { - t.Fatalf("expected content to see %v, saw %v", expLiveness3, actLiveness3) - } - - // readiness - readinessRes3, err := client3.NodeHealthCheck(readinessReq) - if err != nil { - t.Fatalf("%v", err) - } - expReadiness3 := index.NodeHealthCheckResponse_READY - actReadiness3 := readinessRes3.State - if expReadiness3 != actReadiness3 { - t.Fatalf("expected content to see %v, saw %v", expReadiness3, actReadiness3) - } -} - -func TestCluster_GetNode(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress1 := "" - shardId1 := "" - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &index.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - server1.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - managerGrpcAddress2 := "" - shardId2 := "" - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &index.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - server2.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - managerGrpcAddress3 := "" - shardId3 := "" - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &index.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - server3.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get all node info from all nodes - node11, err := client1.NodeInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expNode11 := &index.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: index.Node_LEADER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - actNode11 := node11.Node - if !reflect.DeepEqual(expNode11, actNode11) { - t.Fatalf("expected content to see %v, saw %v", expNode11, actNode11) - } - - node21, err := client2.NodeInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expNode21 := &index.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - actNode21 := node21.Node - if !reflect.DeepEqual(expNode21, actNode21) { - t.Fatalf("expected content to see %v, saw %v", expNode21, actNode21) - } - - node31, err := client3.NodeInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expNode31 := &index.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - actNode31 := node31.Node - if !reflect.DeepEqual(expNode31, actNode31) { - t.Fatalf("expected content to see %v, saw %v", expNode31, actNode31) - } -} - -func TestCluster_GetCluster(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - managerGrpcAddress1 := "" - shardId1 := "" - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &index.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - server1.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - managerGrpcAddress2 := "" - shardId2 := "" - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &index.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - server2.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - managerGrpcAddress3 := "" - shardId3 := "" - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &index.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: index.Node_UNKNOWN, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - server3.Stop() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get cluster info from manager1 - cluster1, err := client1.ClusterInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expCluster1 := &index.Cluster{ - Nodes: map[string]*index.Node{ - nodeId1: { - Id: nodeId1, - BindAddress: bindAddress1, - State: index.Node_LEADER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - }, - nodeId2: { - Id: nodeId2, - BindAddress: bindAddress2, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - }, - nodeId3: { - Id: nodeId3, - BindAddress: bindAddress3, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - }, - }, - } - actCluster1 := cluster1.Cluster - if !reflect.DeepEqual(expCluster1, actCluster1) { - t.Fatalf("expected content to see %v, saw %v", expCluster1, actCluster1) - } - - cluster2, err := client2.ClusterInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expCluster2 := &index.Cluster{ - Nodes: map[string]*index.Node{ - nodeId1: { - Id: nodeId1, - BindAddress: bindAddress1, - State: index.Node_LEADER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - }, - nodeId2: { - Id: nodeId2, - BindAddress: bindAddress2, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - }, - nodeId3: { - Id: nodeId3, - BindAddress: bindAddress3, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - }, - }, - } - actCluster2 := cluster2.Cluster - if !reflect.DeepEqual(expCluster2, actCluster2) { - t.Fatalf("expected content to see %v, saw %v", expCluster2, actCluster2) - } - - cluster3, err := client3.ClusterInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expCluster3 := &index.Cluster{ - Nodes: map[string]*index.Node{ - nodeId1: { - Id: nodeId1, - BindAddress: bindAddress1, - State: index.Node_LEADER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - }, - nodeId2: { - Id: nodeId2, - BindAddress: bindAddress2, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - }, - nodeId3: { - Id: nodeId3, - BindAddress: bindAddress3, - State: index.Node_FOLLOWER, - Metadata: &index.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - }, - }, - } - actCluster3 := cluster3.Cluster - if !reflect.DeepEqual(expCluster3, actCluster3) { - t.Fatalf("expected content to see %v, saw %v", expCluster3, actCluster3) - } -} diff --git a/logutils/logger.go b/log/log.go similarity index 54% rename from logutils/logger.go rename to log/log.go index 28611dd..5470fdf 100644 --- a/logutils/logger.go +++ b/log/log.go @@ -1,22 +1,10 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logutils +package log import ( "os" + "strconv" + accesslog "github.com/mash/go-accesslog" "github.com/natefinch/lumberjack" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -42,9 +30,12 @@ func NewLogger(logLevel string, logFilename string, logMaxSize int, logMaxBackup } var ws zapcore.WriteSyncer - if logFilename == "" { + switch logFilename { + case "", os.Stderr.Name(): ws = zapcore.AddSync(os.Stderr) - } else { + case os.Stdout.Name(): + ws = zapcore.AddSync(os.Stdout) + default: ws = zapcore.AddSync( &lumberjack.Logger{ Filename: logFilename, @@ -74,7 +65,43 @@ func NewLogger(logLevel string, logFilename string, logMaxSize int, logMaxBackup ), zap.AddCaller(), //zap.AddStacktrace(ll), - ) + ).Named("blast") return logger } + +type HTTPLogger struct { + Logger *zap.Logger +} + +func (l HTTPLogger) Log(record accesslog.LogRecord) { + // Output log that formatted Apache combined. + size := "-" + if record.Size > 0 { + size = strconv.FormatInt(record.Size, 10) + } + + referer := "-" + if record.RequestHeader.Get("Referer") != "" { + referer = record.RequestHeader.Get("Referer") + } + + userAgent := "-" + if record.RequestHeader.Get("User-Agent") != "" { + userAgent = record.RequestHeader.Get("User-Agent") + } + + l.Logger.Info( + "", + zap.String("ip", record.Ip), + zap.String("username", record.Username), + zap.String("time", record.Time.Format("02/Jan/2006 03:04:05 +0000")), + zap.String("method", record.Method), + zap.String("uri", record.Uri), + zap.String("protocol", record.Protocol), + zap.Int("status", record.Status), + zap.String("size", size), + zap.String("referer", referer), + zap.String("user_agent", userAgent), + ) +} diff --git a/logutils/grpc_logger.go b/logutils/grpc_logger.go deleted file mode 100644 index 85d6fa9..0000000 --- a/logutils/grpc_logger.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logutils - -import ( - "os" - - "github.com/natefinch/lumberjack" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -func NewGRPCLogger(logLevel string, logFilename string, logMaxSize int, logMaxBackups int, logMaxAge int, logCompress bool) *zap.Logger { - var ll zapcore.Level - switch logLevel { - case "DEBUG": - ll = zap.DebugLevel - case "INFO": - ll = zap.InfoLevel - case "WARN", "WARNING": - ll = zap.WarnLevel - case "ERR", "ERROR": - ll = zap.WarnLevel - case "DPANIC": - ll = zap.DPanicLevel - case "PANIC": - ll = zap.PanicLevel - case "FATAL": - ll = zap.FatalLevel - } - - var ws zapcore.WriteSyncer - if logFilename == "" { - ws = zapcore.AddSync(os.Stderr) - } else { - ws = zapcore.AddSync( - &lumberjack.Logger{ - Filename: logFilename, - MaxSize: logMaxSize, // megabytes - MaxBackups: logMaxBackups, - MaxAge: logMaxAge, // days - Compress: logCompress, - }, - ) - } - - ec := zap.NewProductionEncoderConfig() - ec.TimeKey = "_timestamp_" - ec.LevelKey = "_level_" - ec.NameKey = "_name_" - ec.CallerKey = "_caller_" - ec.MessageKey = "_message_" - ec.StacktraceKey = "_stacktrace_" - ec.EncodeTime = zapcore.ISO8601TimeEncoder - ec.EncodeCaller = zapcore.ShortCallerEncoder - - logger := zap.New( - zapcore.NewCore( - zapcore.NewJSONEncoder(ec), - ws, - ll, - ), - //zap.AddCaller(), - //zap.AddStacktrace(ll), - ) - - return logger -} diff --git a/logutils/http_logger.go b/logutils/http_logger.go deleted file mode 100644 index bb4371f..0000000 --- a/logutils/http_logger.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logutils - -import ( - "io" - "log" - "os" - "strconv" - - accesslog "github.com/mash/go-accesslog" - "github.com/natefinch/lumberjack" -) - -func NewFileWriter(filename string, maxSize int, maxBackups int, maxAge int, compress bool) io.Writer { - var writer io.Writer - - switch filename { - case "", os.Stderr.Name(): - writer = os.Stderr - case os.Stdout.Name(): - writer = os.Stdout - default: - writer = &lumberjack.Logger{ - Filename: filename, - MaxSize: maxSize, // megabytes - MaxBackups: maxBackups, - MaxAge: maxAge, // days - Compress: compress, // disabled by default - } - } - - return writer -} - -type ApacheCombinedLogger struct { - logger *log.Logger -} - -func NewApacheCombinedLogger(filename string, maxSize int, maxBackups int, maxAge int, compress bool) *ApacheCombinedLogger { - writer := NewFileWriter(filename, maxSize, maxBackups, maxAge, compress) - return &ApacheCombinedLogger{ - logger: log.New(writer, "", 0), - } -} - -func (l ApacheCombinedLogger) Log(record accesslog.LogRecord) { - // Output log that formatted Apache combined. - size := "-" - if record.Size > 0 { - size = strconv.FormatInt(record.Size, 10) - } - - referer := "-" - if record.RequestHeader.Get("Referer") != "" { - referer = record.RequestHeader.Get("Referer") - } - - userAgent := "-" - if record.RequestHeader.Get("User-Agent") != "" { - userAgent = record.RequestHeader.Get("User-Agent") - } - - l.logger.Printf( - "%s - %s [%s] \"%s %s %s\" %d %s \"%s\" \"%s\" %.4f", - record.Ip, - record.Username, - record.Time.Format("02/Jan/2006 03:04:05 +0000"), - record.Method, - record.Uri, - record.Protocol, - record.Status, - size, - referer, - userAgent, - record.ElapsedTime.Seconds(), - ) -} diff --git a/main.go b/main.go new file mode 100644 index 0000000..3ad98ef --- /dev/null +++ b/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "os" + + "github.com/mosuka/blast/cmd" +) + +func main() { + if err := cmd.Execute(); err != nil { + os.Exit(1) + } + + os.Exit(0) +} diff --git a/manager/grpc_client.go b/manager/grpc_client.go deleted file mode 100644 index 4d732a4..0000000 --- a/manager/grpc_client.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "context" - "math" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mosuka/blast/protobuf/management" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type GRPCClient struct { - ctx context.Context - cancel context.CancelFunc - conn *grpc.ClientConn - client management.ManagementClient -} - -func NewGRPCContext() (context.Context, context.CancelFunc) { - baseCtx := context.TODO() - //return context.WithTimeout(baseCtx, 60*time.Second) - return context.WithCancel(baseCtx) -} - -func NewGRPCClient(address string) (*GRPCClient, error) { - ctx, cancel := NewGRPCContext() - - //streamRetryOpts := []grpc_retry.CallOption{ - // grpc_retry.Disable(), - //} - - //unaryRetryOpts := []grpc_retry.CallOption{ - // grpc_retry.WithBackoff(grpc_retry.BackoffLinear(100 * time.Millisecond)), - // grpc_retry.WithCodes(codes.Unavailable), - // grpc_retry.WithMax(100), - //} - - dialOpts := []grpc.DialOption{ - grpc.WithInsecure(), - grpc.WithDefaultCallOptions( - grpc.MaxCallSendMsgSize(math.MaxInt32), - grpc.MaxCallRecvMsgSize(math.MaxInt32), - ), - //grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(streamRetryOpts...)), - //grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(unaryRetryOpts...)), - } - - conn, err := grpc.DialContext(ctx, address, dialOpts...) - if err != nil { - return nil, err - } - - return &GRPCClient{ - ctx: ctx, - cancel: cancel, - conn: conn, - client: management.NewManagementClient(conn), - }, nil -} - -func (c *GRPCClient) Cancel() { - c.cancel() -} - -func (c *GRPCClient) Close() error { - c.Cancel() - if c.conn != nil { - return c.conn.Close() - } - - return c.ctx.Err() -} - -func (c *GRPCClient) GetAddress() string { - return c.conn.Target() -} - -func (c *GRPCClient) NodeHealthCheck(req *management.NodeHealthCheckRequest, opts ...grpc.CallOption) (*management.NodeHealthCheckResponse, error) { - return c.client.NodeHealthCheck(c.ctx, req, opts...) -} - -func (c *GRPCClient) NodeInfo(req *empty.Empty, opts ...grpc.CallOption) (*management.NodeInfoResponse, error) { - return c.client.NodeInfo(c.ctx, req, opts...) -} - -func (c *GRPCClient) ClusterJoin(req *management.ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.ClusterJoin(c.ctx, req, opts...) -} - -func (c *GRPCClient) ClusterLeave(req *management.ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.ClusterLeave(c.ctx, req, opts...) -} - -func (c *GRPCClient) ClusterInfo(req *empty.Empty, opts ...grpc.CallOption) (*management.ClusterInfoResponse, error) { - return c.client.ClusterInfo(c.ctx, &empty.Empty{}, opts...) -} - -func (c *GRPCClient) ClusterWatch(req *empty.Empty, opts ...grpc.CallOption) (management.Management_ClusterWatchClient, error) { - return c.client.ClusterWatch(c.ctx, req, opts...) -} - -func (c *GRPCClient) Get(req *management.GetRequest, opts ...grpc.CallOption) (*management.GetResponse, error) { - res, err := c.client.Get(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - switch st.Code() { - case codes.NotFound: - return &management.GetResponse{}, nil - default: - return nil, err - } - } - return res, nil -} - -func (c *GRPCClient) Set(req *management.SetRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.Set(c.ctx, req, opts...) -} - -func (c *GRPCClient) Delete(req *management.DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - res, err := c.client.Delete(c.ctx, req, opts...) - if err != nil { - st, _ := status.FromError(err) - switch st.Code() { - case codes.NotFound: - return &empty.Empty{}, nil - default: - return nil, err - } - } - return res, nil -} - -func (c *GRPCClient) Watch(req *management.WatchRequest, opts ...grpc.CallOption) (management.Management_WatchClient, error) { - return c.client.Watch(c.ctx, req, opts...) -} - -func (c *GRPCClient) Snapshot(req *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { - return c.client.Snapshot(c.ctx, &empty.Empty{}) -} diff --git a/manager/grpc_gateway.go b/manager/grpc_gateway.go deleted file mode 100644 index 3f505d4..0000000 --- a/manager/grpc_gateway.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - - "github.com/golang/protobuf/ptypes/any" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/management" - "go.uber.org/zap" - "google.golang.org/grpc" -) - -type JsonMarshaler struct{} - -// ContentType always Returns "application/json". -func (*JsonMarshaler) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -func (j *JsonMarshaler) Marshal(v interface{}) ([]byte, error) { - switch v.(type) { - case *management.GetResponse: - value, err := protobuf.MarshalAny(v.(*management.GetResponse).Value) - if err != nil { - return nil, err - } - return json.Marshal( - map[string]interface{}{ - "value": value, - }, - ) - default: - return json.Marshal(v) - } -} - -// Unmarshal unmarshals JSON data into "v". -func (j *JsonMarshaler) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// NewDecoder returns a Decoder which reads JSON stream from "r". -func (j *JsonMarshaler) NewDecoder(r io.Reader) runtime.Decoder { - return runtime.DecoderFunc( - func(v interface{}) error { - buffer, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - switch v.(type) { - case *management.SetRequest: - var tmpValue map[string]interface{} - err = json.Unmarshal(buffer, &tmpValue) - if err != nil { - return err - } - value, ok := tmpValue["value"] - if !ok { - return errors.New("value does not exist") - } - v.(*management.SetRequest).Value = &any.Any{} - return protobuf.UnmarshalAny(value, v.(*management.SetRequest).Value) - default: - return json.Unmarshal(buffer, v) - } - }, - ) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JsonMarshaler) NewEncoder(w io.Writer) runtime.Encoder { - return json.NewEncoder(w) -} - -// Delimiter for newline encoded JSON streams. -func (j *JsonMarshaler) Delimiter() []byte { - return []byte("\n") -} - -type GRPCGateway struct { - grpcGatewayAddr string - grpcAddr string - logger *zap.Logger - - ctx context.Context - cancel context.CancelFunc - listener net.Listener -} - -func NewGRPCGateway(grpcGatewayAddr string, grpcAddr string, logger *zap.Logger) (*GRPCGateway, error) { - return &GRPCGateway{ - grpcGatewayAddr: grpcGatewayAddr, - grpcAddr: grpcAddr, - logger: logger, - }, nil -} - -func (s *GRPCGateway) Start() error { - s.ctx, s.cancel = NewGRPCContext() - - mux := runtime.NewServeMux( - runtime.WithMarshalerOption("application/json", new(JsonMarshaler)), - ) - opts := []grpc.DialOption{grpc.WithInsecure()} - - err := management.RegisterManagementHandlerFromEndpoint(s.ctx, mux, s.grpcAddr, opts) - if err != nil { - return err - } - - s.listener, err = net.Listen("tcp", s.grpcGatewayAddr) - if err != nil { - return err - } - - err = http.Serve(s.listener, mux) - if err != nil { - return err - } - - return nil -} - -func (s *GRPCGateway) Stop() error { - defer s.cancel() - - err := s.listener.Close() - if err != nil { - return err - } - - return nil -} - -func (s *GRPCGateway) GetAddress() (string, error) { - tcpAddr, err := net.ResolveTCPAddr("tcp", s.listener.Addr().String()) - if err != nil { - return "", err - } - - v4Addr := "" - if tcpAddr.IP.To4() != nil { - v4Addr = tcpAddr.IP.To4().String() - } - port := tcpAddr.Port - - return fmt.Sprintf("%s:%d", v4Addr, port), nil -} diff --git a/manager/grpc_server.go b/manager/grpc_server.go deleted file mode 100644 index 8d17486..0000000 --- a/manager/grpc_server.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "fmt" - "net" - - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/mosuka/blast/protobuf/management" - "go.uber.org/zap" - "google.golang.org/grpc" - //grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" - //grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" - //grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags" - //grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" -) - -type GRPCServer struct { - service management.ManagementServer - server *grpc.Server - listener net.Listener - - logger *zap.Logger -} - -func NewGRPCServer(grpcAddr string, service management.ManagementServer, logger *zap.Logger) (*GRPCServer, error) { - server := grpc.NewServer( - grpc.StreamInterceptor( - grpc_middleware.ChainStreamServer( - //grpc_ctxtags.StreamServerInterceptor(), - //grpc_opentracing.StreamServerInterceptor(), - grpc_prometheus.StreamServerInterceptor, - grpc_zap.StreamServerInterceptor(logger), - //grpc_auth.StreamServerInterceptor(myAuthFunction), - //grpc_recovery.StreamServerInterceptor(), - ), - ), - grpc.UnaryInterceptor( - grpc_middleware.ChainUnaryServer( - //grpc_ctxtags.UnaryServerInterceptor(), - //grpc_opentracing.UnaryServerInterceptor(), - grpc_prometheus.UnaryServerInterceptor, - grpc_zap.UnaryServerInterceptor(logger), - //grpc_auth.UnaryServerInterceptor(myAuthFunction), - //grpc_recovery.UnaryServerInterceptor(), - ), - ), - ) - - management.RegisterManagementServer(server, service) - - grpc_prometheus.EnableHandlingTimeHistogram() - grpc_prometheus.Register(server) - - listener, err := net.Listen("tcp", grpcAddr) - if err != nil { - return nil, err - } - - return &GRPCServer{ - service: service, - server: server, - listener: listener, - logger: logger, - }, nil -} - -func (s *GRPCServer) Start() error { - s.logger.Info("start server") - err := s.server.Serve(s.listener) - if err != nil { - return err - } - - return nil -} - -func (s *GRPCServer) Stop() error { - s.logger.Info("stop server") - s.server.Stop() - //s.server.GracefulStop() - - return nil -} - -func (s *GRPCServer) GetAddress() (string, error) { - tcpAddr, err := net.ResolveTCPAddr("tcp", s.listener.Addr().String()) - if err != nil { - return "", err - } - - v4Addr := "" - if tcpAddr.IP.To4() != nil { - v4Addr = tcpAddr.IP.To4().String() - } - port := tcpAddr.Port - - return fmt.Sprintf("%s:%d", v4Addr, port), nil -} diff --git a/manager/grpc_service.go b/manager/grpc_service.go deleted file mode 100644 index c79f7ad..0000000 --- a/manager/grpc_service.go +++ /dev/null @@ -1,714 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "context" - "encoding/json" - "errors" - "strings" - "sync" - "time" - - "github.com/golang/protobuf/ptypes/any" - "github.com/golang/protobuf/ptypes/empty" - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/raft" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/management" - "go.uber.org/zap" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type GRPCService struct { - raftServer *RaftServer - logger *zap.Logger - - updateClusterStopCh chan struct{} - updateClusterDoneCh chan struct{} - peers *management.Cluster - peerClients map[string]*GRPCClient - cluster *management.Cluster - clusterChans map[chan management.ClusterWatchResponse]struct{} - clusterMutex sync.RWMutex - - stateChans map[chan management.WatchResponse]struct{} - stateMutex sync.RWMutex -} - -func NewGRPCService(raftServer *RaftServer, logger *zap.Logger) (*GRPCService, error) { - return &GRPCService{ - raftServer: raftServer, - logger: logger, - - peers: &management.Cluster{Nodes: make(map[string]*management.Node, 0)}, - peerClients: make(map[string]*GRPCClient, 0), - cluster: &management.Cluster{Nodes: make(map[string]*management.Node, 0)}, - clusterChans: make(map[chan management.ClusterWatchResponse]struct{}), - - stateChans: make(map[chan management.WatchResponse]struct{}), - }, nil -} - -func (s *GRPCService) Start() error { - s.logger.Info("start to update cluster info") - go s.startUpdateCluster(500 * time.Millisecond) - - return nil -} - -func (s *GRPCService) Stop() error { - s.logger.Info("stop to update cluster info") - s.stopUpdateCluster() - - return nil -} - -func (s *GRPCService) getLeaderClient() (*GRPCClient, error) { - //leaderId, err := s.raftServer.LeaderID(10 * time.Second) - //if err != nil { - // return nil, err - //} - //client, exist := s.peerClients[string(leaderId)] - //if !exist { - // err := errors.New("there is no client for leader") - // s.logger.Error(err.Error()) - // return nil, err - //} - //return client, nil - - for id, node := range s.cluster.Nodes { - switch node.State { - case management.Node_LEADER: - } - if client, exist := s.peerClients[id]; exist { - return client, nil - } - } - - err := errors.New("there is no client for leader") - s.logger.Error(err.Error()) - return nil, err -} - -func (s *GRPCService) cloneCluster(cluster *management.Cluster) (*management.Cluster, error) { - b, err := json.Marshal(cluster) - if err != nil { - return nil, err - } - - var clone *management.Cluster - err = json.Unmarshal(b, &clone) - if err != nil { - return nil, err - } - - return clone, nil -} - -func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { - s.updateClusterStopCh = make(chan struct{}) - s.updateClusterDoneCh = make(chan struct{}) - - defer func() { - close(s.updateClusterDoneCh) - }() - - ticker := time.NewTicker(checkInterval) - defer ticker.Stop() - - savedCluster, err := s.cloneCluster(s.cluster) - if err != nil { - s.logger.Error(err.Error()) - return - } - - for { - select { - case <-s.updateClusterStopCh: - s.logger.Info("received a request to stop updating a cluster") - return - case <-ticker.C: - s.cluster, err = s.getCluster() - if err != nil { - s.logger.Error(err.Error()) - return - } - - snapshotCluster, err := s.cloneCluster(s.cluster) - if err != nil { - s.logger.Error(err.Error()) - return - } - - // create peer node list with out self node - for id, node := range snapshotCluster.Nodes { - if id != s.NodeID() { - s.peers.Nodes[id] = node - } - } - - // open clients for peer nodes - for id, node := range s.peers.Nodes { - if node.Metadata.GrpcAddress == "" { - s.logger.Debug("missing gRPC address", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - - client, exist := s.peerClients[id] - if exist { - if client.GetAddress() != node.Metadata.GrpcAddress { - s.logger.Info("recreate gRPC client", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - delete(s.peerClients, id) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id)) - } - newClient, err := NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - s.peerClients[id] = newClient - } - } else { - s.logger.Info("create gRPC client", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - newClient, err := NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - s.peerClients[id] = newClient - } - } - - // close clients for non-existent peer nodes - for id, client := range s.peerClients { - if _, exist := s.peers.Nodes[id]; !exist { - s.logger.Info("close gRPC client", zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) - } - delete(s.peerClients, id) - } - } - - // check joined and updated nodes - for id, node := range snapshotCluster.Nodes { - nodeSnapshot, exist := savedCluster.Nodes[id] - if exist { - // node exists in the cluster - n1, err := json.Marshal(node) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", node)) - continue - } - n2, err := json.Marshal(nodeSnapshot) - if err != nil { - s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", nodeSnapshot)) - continue - } - if !cmp.Equal(n1, n2) { - // node updated - // notify the cluster changes - clusterResp := &management.ClusterWatchResponse{ - Event: management.ClusterWatchResponse_UPDATE, - Node: node, - Cluster: snapshotCluster, - } - for c := range s.clusterChans { - c <- *clusterResp - } - } - } else { - // node joined - // notify the cluster changes - clusterResp := &management.ClusterWatchResponse{ - Event: management.ClusterWatchResponse_JOIN, - Node: node, - Cluster: snapshotCluster, - } - for c := range s.clusterChans { - c <- *clusterResp - } - } - } - - // check left nodes - for id, node := range savedCluster.Nodes { - if _, exist := snapshotCluster.Nodes[id]; !exist { - // node left - // notify the cluster changes - clusterResp := &management.ClusterWatchResponse{ - Event: management.ClusterWatchResponse_LEAVE, - Node: node, - Cluster: snapshotCluster, - } - for c := range s.clusterChans { - c <- *clusterResp - } - } - } - - savedCluster = snapshotCluster - default: - time.Sleep(100 * time.Millisecond) - } - } -} - -func (s *GRPCService) stopUpdateCluster() { - s.logger.Info("close all peer clients") - for id, client := range s.peerClients { - s.logger.Debug("close peer client", zap.String("id", id), zap.String("address", client.GetAddress())) - err := client.Close() - if err != nil { - s.logger.Warn(err.Error()) - } - } - - if s.updateClusterStopCh != nil { - s.logger.Info("send a request to stop updating a cluster") - close(s.updateClusterStopCh) - } - - s.logger.Info("wait for the cluster update to stop") - <-s.updateClusterDoneCh - s.logger.Info("the cluster update has been stopped") -} - -func (s *GRPCService) NodeHealthCheck(ctx context.Context, req *management.NodeHealthCheckRequest) (*management.NodeHealthCheckResponse, error) { - resp := &management.NodeHealthCheckResponse{} - - switch req.Probe { - case management.NodeHealthCheckRequest_UNKNOWN: - fallthrough - case management.NodeHealthCheckRequest_HEALTHINESS: - resp.State = management.NodeHealthCheckResponse_HEALTHY - case management.NodeHealthCheckRequest_LIVENESS: - resp.State = management.NodeHealthCheckResponse_ALIVE - case management.NodeHealthCheckRequest_READINESS: - resp.State = management.NodeHealthCheckResponse_READY - default: - err := errors.New("unknown probe") - s.logger.Error(err.Error()) - return resp, status.Error(codes.InvalidArgument, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) NodeID() string { - return s.raftServer.NodeID() -} - -func (s *GRPCService) getSelfNode() *management.Node { - node := s.raftServer.node - - switch s.raftServer.State() { - case raft.Follower: - node.State = management.Node_FOLLOWER - case raft.Candidate: - node.State = management.Node_CANDIDATE - case raft.Leader: - node.State = management.Node_LEADER - case raft.Shutdown: - node.State = management.Node_SHUTDOWN - default: - node.State = management.Node_UNKNOWN - } - - return node -} - -func (s *GRPCService) getPeerNode(id string) (*management.Node, error) { - if _, exist := s.peerClients[id]; !exist { - err := errors.New("node does not exist in peers") - s.logger.Debug(err.Error(), zap.String("id", id)) - return nil, err - } - - req := &empty.Empty{} - resp, err := s.peerClients[id].NodeInfo(req) - if err != nil { - s.logger.Debug(err.Error(), zap.String("id", id)) - return &management.Node{ - BindAddress: "", - State: management.Node_SHUTDOWN, - Metadata: &management.Metadata{ - GrpcAddress: "", - HttpAddress: "", - }, - }, nil - } - - return resp.Node, nil -} - -func (s *GRPCService) getNode(id string) (*management.Node, error) { - if id == "" || id == s.NodeID() { - return s.getSelfNode(), nil - } else { - return s.getPeerNode(id) - } -} - -func (s *GRPCService) NodeInfo(ctx context.Context, req *empty.Empty) (*management.NodeInfoResponse, error) { - resp := &management.NodeInfoResponse{} - - node, err := s.getNode(s.NodeID()) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return &management.NodeInfoResponse{ - Node: node, - }, nil -} - -func (s *GRPCService) setNode(node *management.Node) error { - if s.raftServer.IsLeader() { - err := s.raftServer.SetNode(node) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - req := &management.ClusterJoinRequest{ - Node: node, - } - - _, err = client.ClusterJoin(req) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } - - return nil -} - -func (s *GRPCService) ClusterJoin(ctx context.Context, req *management.ClusterJoinRequest) (*empty.Empty, error) { - resp := &empty.Empty{} - - err := s.setNode(req.Node) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) deleteNode(id string) error { - if s.raftServer.IsLeader() { - err := s.raftServer.DeleteNode(id) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - req := &management.ClusterLeaveRequest{ - Id: id, - } - - _, err = client.ClusterLeave(req) - if err != nil { - s.logger.Error(err.Error()) - return err - } - } - - return nil -} - -func (s *GRPCService) ClusterLeave(ctx context.Context, req *management.ClusterLeaveRequest) (*empty.Empty, error) { - resp := &empty.Empty{} - - err := s.deleteNode(req.Id) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} - -func (s *GRPCService) getCluster() (*management.Cluster, error) { - cluster, err := s.raftServer.GetCluster() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - // update latest node state - for id := range cluster.Nodes { - node, err := s.getNode(id) - if err != nil { - s.logger.Debug(err.Error()) - continue - } - cluster.Nodes[id] = node - } - - return cluster, nil -} - -func (s *GRPCService) ClusterInfo(ctx context.Context, req *empty.Empty) (*management.ClusterInfoResponse, error) { - resp := &management.ClusterInfoResponse{} - - cluster, err := s.getCluster() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.Cluster = cluster - - return resp, nil -} - -func (s *GRPCService) ClusterWatch(req *empty.Empty, server management.Management_ClusterWatchServer) error { - chans := make(chan management.ClusterWatchResponse) - - s.clusterMutex.Lock() - s.clusterChans[chans] = struct{}{} - s.clusterMutex.Unlock() - - defer func() { - s.clusterMutex.Lock() - delete(s.clusterChans, chans) - s.clusterMutex.Unlock() - close(chans) - }() - - for resp := range chans { - err := server.Send(&resp) - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - } - - return nil -} - -func (s *GRPCService) Get(ctx context.Context, req *management.GetRequest) (*management.GetResponse, error) { - s.stateMutex.RLock() - defer func() { - s.stateMutex.RUnlock() - }() - - resp := &management.GetResponse{} - - value, err := s.raftServer.GetValue(req.Key) - if err != nil { - switch err { - case blasterrors.ErrNotFound: - s.logger.Debug(err.Error(), zap.String("key", req.Key)) - return resp, status.Error(codes.NotFound, err.Error()) - default: - s.logger.Error(err.Error(), zap.String("key", req.Key)) - return resp, status.Error(codes.Internal, err.Error()) - } - } - - valueAny := &any.Any{} - err = protobuf.UnmarshalAny(value, valueAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.Value = valueAny - - return resp, nil -} - -func (s *GRPCService) Set(ctx context.Context, req *management.SetRequest) (*empty.Empty, error) { - s.stateMutex.Lock() - defer func() { - s.stateMutex.Unlock() - }() - - resp := &empty.Empty{} - - if s.raftServer.IsLeader() { - value, err := protobuf.MarshalAny(req.Value) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - err = s.raftServer.SetValue(req.Key, value) - if err != nil { - s.logger.Error(err.Error()) - switch err { - case blasterrors.ErrNotFound: - return resp, status.Error(codes.NotFound, err.Error()) - default: - return resp, status.Error(codes.Internal, err.Error()) - } - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - resp, err = client.Set(req) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } - - // notify - for c := range s.stateChans { - c <- management.WatchResponse{ - Command: management.WatchResponse_SET, - Key: req.Key, - Value: req.Value, - } - } - - return resp, nil -} - -func (s *GRPCService) Delete(ctx context.Context, req *management.DeleteRequest) (*empty.Empty, error) { - s.stateMutex.Lock() - defer func() { - s.stateMutex.Unlock() - }() - - resp := &empty.Empty{} - - if s.raftServer.IsLeader() { - err := s.raftServer.DeleteValue(req.Key) - if err != nil { - switch err { - case blasterrors.ErrNotFound: - s.logger.Debug(err.Error(), zap.String("key", req.Key)) - return resp, status.Error(codes.NotFound, err.Error()) - default: - s.logger.Error(err.Error(), zap.String("key", req.Key)) - return resp, status.Error(codes.Internal, err.Error()) - } - } - } else { - // forward to leader - client, err := s.getLeaderClient() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - resp, err = client.Delete(req) - if err != nil { - switch err { - case blasterrors.ErrNotFound: - s.logger.Debug(err.Error(), zap.String("key", req.Key)) - return resp, status.Error(codes.NotFound, err.Error()) - default: - s.logger.Error(err.Error(), zap.String("key", req.Key)) - return resp, status.Error(codes.Internal, err.Error()) - } - } - } - - // notify - for c := range s.stateChans { - c <- management.WatchResponse{ - Command: management.WatchResponse_DELETE, - Key: req.Key, - } - } - - return resp, nil -} - -func (s *GRPCService) Watch(req *management.WatchRequest, server management.Management_WatchServer) error { - chans := make(chan management.WatchResponse) - - s.stateMutex.Lock() - s.stateChans[chans] = struct{}{} - s.stateMutex.Unlock() - - defer func() { - s.stateMutex.Lock() - delete(s.stateChans, chans) - s.stateMutex.Unlock() - close(chans) - }() - - // normalize key - key := func(key string) string { - keys := make([]string, 0) - for _, k := range strings.Split(key, "/") { - if k != "" { - keys = append(keys, k) - } - } - return strings.Join(keys, "/") - }(req.Key) - - for resp := range chans { - if !strings.HasPrefix(resp.Key, key) { - continue - } - err := server.Send(&resp) - if err != nil { - s.logger.Error(err.Error()) - return status.Error(codes.Internal, err.Error()) - } - } - - return nil -} - -func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { - s.stateMutex.Lock() - defer func() { - s.stateMutex.Unlock() - }() - - resp := &empty.Empty{} - - err := s.raftServer.Snapshot() - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - return resp, nil -} diff --git a/manager/http_handler.go b/manager/http_handler.go deleted file mode 100644 index 0ceb447..0000000 --- a/manager/http_handler.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "net/http" - "time" - - "github.com/gorilla/mux" - blasthttp "github.com/mosuka/blast/http" - "github.com/mosuka/blast/version" - "github.com/prometheus/client_golang/prometheus/promhttp" - "go.uber.org/zap" -) - -type Router struct { - mux.Router - - logger *zap.Logger -} - -func NewRouter(logger *zap.Logger) (*Router, error) { - router := &Router{ - logger: logger, - } - - router.StrictSlash(true) - - router.Handle("/", NewRootHandler(logger)).Methods("GET") - router.Handle("/metrics", promhttp.Handler()).Methods("GET") - - return router, nil -} - -func (r *Router) Close() error { - return nil -} - -type RootHandler struct { - logger *zap.Logger -} - -func NewRootHandler(logger *zap.Logger) *RootHandler { - return &RootHandler{ - logger: logger, - } -} - -func (h *RootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - status := http.StatusOK - content := make([]byte, 0) - - defer blasthttp.RecordMetrics(start, status, w, r) - - msgMap := map[string]interface{}{ - "version": version.Version, - "status": status, - } - - content, err := blasthttp.NewJSONMessage(msgMap) - if err != nil { - h.logger.Error(err.Error()) - } - - blasthttp.WriteResponse(w, content, status, h.logger) -} diff --git a/manager/http_server.go b/manager/http_server.go deleted file mode 100644 index 33bd0fc..0000000 --- a/manager/http_server.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "net" - "net/http" - - accesslog "github.com/mash/go-accesslog" - "go.uber.org/zap" -) - -type HTTPServer struct { - listener net.Listener - router *Router - - logger *zap.Logger - httpLogger accesslog.Logger -} - -func NewHTTPServer(httpAddr string, router *Router, logger *zap.Logger, httpLogger accesslog.Logger) (*HTTPServer, error) { - listener, err := net.Listen("tcp", httpAddr) - if err != nil { - return nil, err - } - - return &HTTPServer{ - listener: listener, - router: router, - logger: logger, - httpLogger: httpLogger, - }, nil -} - -func (s *HTTPServer) Start() error { - err := http.Serve( - s.listener, - accesslog.NewLoggingHandler( - s.router, - s.httpLogger, - ), - ) - if err != nil { - return err - } - - return nil -} - -func (s *HTTPServer) Stop() error { - err := s.listener.Close() - if err != nil { - return err - } - - return nil -} diff --git a/manager/raft_fsm.go b/manager/raft_fsm.go deleted file mode 100644 index bfd859f..0000000 --- a/manager/raft_fsm.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "encoding/json" - "errors" - "io" - "io/ioutil" - "sync" - - "github.com/gogo/protobuf/proto" - "github.com/hashicorp/raft" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/maputils" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/management" - "go.uber.org/zap" -) - -type RaftFSM struct { - path string - logger *zap.Logger - - cluster *management.Cluster - clusterMutex sync.RWMutex - - data maputils.Map - dataMutex sync.RWMutex -} - -func NewRaftFSM(path string, logger *zap.Logger) (*RaftFSM, error) { - return &RaftFSM{ - path: path, - logger: logger, - }, nil -} - -func (f *RaftFSM) Start() error { - f.logger.Info("initialize cluster") - f.cluster = &management.Cluster{Nodes: make(map[string]*management.Node, 0)} - - f.logger.Info("initialize store data") - f.data = maputils.Map{} - - return nil -} - -func (f *RaftFSM) Stop() error { - return nil -} - -func (f *RaftFSM) GetNode(nodeId string) (*management.Node, error) { - f.clusterMutex.RLock() - defer f.clusterMutex.RUnlock() - - node, ok := f.cluster.Nodes[nodeId] - if !ok { - return nil, blasterrors.ErrNotFound - } - - return node, nil -} - -func (f *RaftFSM) SetNode(node *management.Node) error { - f.clusterMutex.RLock() - defer f.clusterMutex.RUnlock() - - f.cluster.Nodes[node.Id] = node - - return nil -} - -func (f *RaftFSM) DeleteNode(nodeId string) error { - f.clusterMutex.RLock() - defer f.clusterMutex.RUnlock() - - if _, ok := f.cluster.Nodes[nodeId]; !ok { - return blasterrors.ErrNotFound - } - - delete(f.cluster.Nodes, nodeId) - - return nil -} - -func (f *RaftFSM) GetValue(key string) (interface{}, error) { - // get raw data - value, err := f.data.Get(key) - if err != nil { - switch err { - case maputils.ErrNotFound: - f.logger.Debug("key does not found in the store data", zap.String("key", key)) - return nil, blasterrors.ErrNotFound - default: - f.logger.Error(err.Error(), zap.String("key", key)) - return nil, err - } - } - - return value, nil -} - -func (f *RaftFSM) SetValue(key string, value interface{}, merge bool) error { - if merge { - err := f.data.Merge(key, value) - if err != nil { - f.logger.Error(err.Error(), zap.String("key", key), zap.Any("value", value), zap.Bool("merge", merge)) - return err - } - } else { - err := f.data.Set(key, value) - if err != nil { - f.logger.Error(err.Error(), zap.String("key", key), zap.Any("value", value), zap.Bool("merge", merge)) - return err - } - } - - return nil -} - -func (f *RaftFSM) DeleteValue(key string) error { - err := f.data.Delete(key) - if err != nil { - switch err { - case maputils.ErrNotFound: - f.logger.Debug("key does not found in the store data", zap.String("key", key)) - return blasterrors.ErrNotFound - default: - f.logger.Error(err.Error(), zap.String("key", key)) - return err - } - } - - return nil -} - -type fsmResponse struct { - error error -} - -func (f *RaftFSM) Apply(l *raft.Log) interface{} { - proposal := &management.Proposal{} - err := proto.Unmarshal(l.Data, proposal) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - switch proposal.Event { - case management.Proposal_SET_NODE: - err = f.SetNode(proposal.Node) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - return &fsmResponse{error: nil} - case management.Proposal_DELETE_NODE: - err = f.DeleteNode(proposal.Node.Id) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - return &fsmResponse{error: nil} - case management.Proposal_SET_VALUE: - value, err := protobuf.MarshalAny(proposal.KeyValue.Value) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - err = f.SetValue(proposal.KeyValue.Key, value, false) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - return &fsmResponse{error: nil} - case management.Proposal_DELETE_VALUE: - err = f.DeleteValue(proposal.KeyValue.Key) - if err != nil { - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } - return &fsmResponse{error: nil} - default: - err = errors.New("unsupported command") - f.logger.Error(err.Error()) - return &fsmResponse{error: err} - } -} - -func (f *RaftFSM) Snapshot() (raft.FSMSnapshot, error) { - f.logger.Info("snapshot") - - return &RaftFSMSnapshot{ - data: f.data, - logger: f.logger, - }, nil -} - -func (f *RaftFSM) Restore(rc io.ReadCloser) error { - f.logger.Info("restore") - - defer func() { - err := rc.Close() - if err != nil { - f.logger.Error(err.Error()) - } - }() - - data, err := ioutil.ReadAll(rc) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - err = json.Unmarshal(data, &f.data) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - return nil -} - -type RaftFSMSnapshot struct { - data maputils.Map - logger *zap.Logger -} - -func (f *RaftFSMSnapshot) Persist(sink raft.SnapshotSink) error { - f.logger.Info("persist") - - defer func() { - err := sink.Close() - if err != nil { - f.logger.Error(err.Error()) - } - }() - - buff, err := json.Marshal(f.data) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - _, err = sink.Write(buff) - if err != nil { - f.logger.Error(err.Error()) - return err - } - - return nil -} - -func (f *RaftFSMSnapshot) Release() { - f.logger.Info("release") -} diff --git a/manager/raft_fsm_test.go b/manager/raft_fsm_test.go deleted file mode 100644 index 86f70ba..0000000 --- a/manager/raft_fsm_test.go +++ /dev/null @@ -1,552 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "io/ioutil" - "os" - "reflect" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/mosuka/blast/logutils" - "github.com/mosuka/blast/protobuf/management" -) - -func TestRaftFSM_GetNode(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - err := os.RemoveAll(tmp) - if err != nil { - t.Fatalf("%v", err) - } - }() - - logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) - - fsm, err := NewRaftFSM(tmp, logger) - if err != nil { - t.Fatalf("%v", err) - } - err = fsm.Start() - defer func() { - err := fsm.Stop() - if err != nil { - t.Fatalf("%v", err) - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - _ = fsm.SetNode( - &management.Node{ - Id: "node1", - BindAddress: "2100", - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: "5100", - HttpAddress: "8100", - }, - }, - ) - _ = fsm.SetNode( - &management.Node{ - Id: "node2", - BindAddress: "2110", - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: "5110", - HttpAddress: "8110", - }, - }, - ) - _ = fsm.SetNode( - &management.Node{ - Id: "node3", - BindAddress: "2120", - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: "5120", - HttpAddress: "8120", - }, - }, - ) - - val1, err := fsm.GetNode("node2") - if err != nil { - t.Fatalf("%v", err) - } - - exp1 := &management.Node{ - Id: "node2", - BindAddress: "2110", - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: "5110", - HttpAddress: "8110", - }, - } - - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - -} - -func TestRaftFSM_SetNode(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - err := os.RemoveAll(tmp) - if err != nil { - t.Fatalf("%v", err) - } - }() - - logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) - - fsm, err := NewRaftFSM(tmp, logger) - if err != nil { - t.Fatalf("%v", err) - } - err = fsm.Start() - defer func() { - err := fsm.Stop() - if err != nil { - t.Fatalf("%v", err) - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - _ = fsm.SetNode( - &management.Node{ - Id: "node1", - BindAddress: "2100", - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: "5100", - HttpAddress: "8100", - }, - }, - ) - _ = fsm.SetNode( - &management.Node{ - Id: "node2", - BindAddress: "2110", - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: "5110", - HttpAddress: "8110", - }, - }, - ) - _ = fsm.SetNode( - &management.Node{ - Id: "node3", - BindAddress: "2120", - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: "5120", - HttpAddress: "8120", - }, - }, - ) - - val1, err := fsm.GetNode("node2") - if err != nil { - t.Fatalf("%v", err) - } - exp1 := &management.Node{ - Id: "node2", - BindAddress: "2110", - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: "5110", - HttpAddress: "8110", - }, - } - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - _ = fsm.SetNode( - &management.Node{ - Id: "node2", - BindAddress: "2110", - State: management.Node_SHUTDOWN, - Metadata: &management.Metadata{ - GrpcAddress: "5110", - HttpAddress: "8110", - }, - }, - ) - - val2, err := fsm.GetNode("node2") - if err != nil { - t.Fatalf("%v", err) - } - exp2 := &management.Node{ - Id: "node2", - BindAddress: "2110", - State: management.Node_SHUTDOWN, - Metadata: &management.Metadata{ - GrpcAddress: "5110", - HttpAddress: "8110", - }, - } - - act2 := val2 - if !reflect.DeepEqual(exp2, act2) { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } -} - -func TestRaftFSM_DeleteNode(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - err := os.RemoveAll(tmp) - if err != nil { - t.Fatalf("%v", err) - } - }() - - logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) - - fsm, err := NewRaftFSM(tmp, logger) - if err != nil { - t.Fatalf("%v", err) - } - err = fsm.Start() - defer func() { - err := fsm.Stop() - if err != nil { - t.Fatalf("%v", err) - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - _ = fsm.SetNode( - &management.Node{ - Id: "node1", - BindAddress: "2100", - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: "5100", - HttpAddress: "8100", - }, - }, - ) - _ = fsm.SetNode( - &management.Node{ - Id: "node2", - BindAddress: "2110", - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: "5110", - HttpAddress: "8110", - }, - }, - ) - _ = fsm.SetNode( - &management.Node{ - Id: "node3", - BindAddress: "2120", - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: "5120", - HttpAddress: "8120", - }, - }, - ) - - val1, err := fsm.GetNode("node2") - if err != nil { - t.Fatalf("%v", err) - } - exp1 := &management.Node{ - Id: "node2", - BindAddress: "2110", - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: "5110", - HttpAddress: "8110", - }, - } - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - err = fsm.DeleteNode("node2") - if err != nil { - t.Fatalf("%v", err) - } - - val2, err := fsm.GetNode("node2") - if err == nil { - t.Fatalf("expected error: %v", err) - } - - act1 = val2 - if reflect.DeepEqual(nil, act1) { - t.Fatalf("expected content to see nil, saw %v", act1) - } -} - -func TestRaftFSM_Get(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - err := os.RemoveAll(tmp) - if err != nil { - t.Fatalf("%v", err) - } - }() - - logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) - - fsm, err := NewRaftFSM(tmp, logger) - if err != nil { - t.Fatalf("%v", err) - } - err = fsm.Start() - defer func() { - err := fsm.Stop() - if err != nil { - t.Fatalf("%v", err) - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - err = fsm.SetValue("/", map[string]interface{}{"a": 1}, false) - if err != nil { - t.Fatalf("%v", err) - } - - value, err := fsm.GetValue("/a") - if err != nil { - t.Fatalf("%v", err) - } - - expectedValue := 1 - actualValue := value - if !cmp.Equal(expectedValue, actualValue) { - t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) - } -} - -func TestRaftFSM_Set(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - err := os.RemoveAll(tmp) - if err != nil { - t.Fatalf("%v", err) - } - }() - - logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) - - fsm, err := NewRaftFSM(tmp, logger) - if err != nil { - t.Fatalf("%v", err) - } - err = fsm.Start() - defer func() { - err := fsm.Stop() - if err != nil { - t.Fatalf("%v", err) - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // set {"a": 1} - err = fsm.SetValue("/", map[string]interface{}{"a": 1}, false) - if err != nil { - t.Fatalf("%v", err) - } - val1, err := fsm.GetValue("/") - if err != nil { - t.Fatalf("%v", err) - } - exp1 := map[string]interface{}{"a": 1} - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - // merge {"a": "A"} - _ = fsm.SetValue("/", map[string]interface{}{"a": "A"}, true) - val2, err := fsm.GetValue("/") - if err != nil { - t.Fatalf("%v", err) - } - exp2 := map[string]interface{}{"a": "A"} - act2 := val2 - if !reflect.DeepEqual(exp2, act2) { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } - - // set {"a": {"b": "AB"}} - err = fsm.SetValue("/", map[string]interface{}{"a": map[string]interface{}{"b": "AB"}}, false) - if err != nil { - t.Fatalf("%v", err) - } - - val3, err := fsm.GetValue("/") - if err != nil { - t.Fatalf("%v", err) - } - exp3 := map[string]interface{}{"a": map[string]interface{}{"b": "AB"}} - act3 := val3 - if !reflect.DeepEqual(exp3, act3) { - t.Fatalf("expected content to see %v, saw %v", exp3, act3) - } - - // merge {"a": {"c": "AC"}} - err = fsm.SetValue("/", map[string]interface{}{"a": map[string]interface{}{"c": "AC"}}, true) - if err != nil { - t.Fatalf("%v", err) - } - val4, err := fsm.GetValue("/") - if err != nil { - t.Fatalf("%v", err) - } - exp4 := map[string]interface{}{"a": map[string]interface{}{"b": "AB", "c": "AC"}} - act4 := val4 - if !reflect.DeepEqual(exp4, act4) { - t.Fatalf("expected content to see %v, saw %v", exp4, act4) - } - - // set {"a": 1} - err = fsm.SetValue("/", map[string]interface{}{"a": 1}, false) - if err != nil { - t.Fatalf("%v", err) - } - val5, err := fsm.GetValue("/") - if err != nil { - t.Fatalf("%v", err) - } - exp5 := map[string]interface{}{"a": 1} - act5 := val5 - if !reflect.DeepEqual(exp5, act5) { - t.Fatalf("expected content to see %v, saw %v", exp5, act5) - } - - // TODO: merge {"a": {"c": "AC"}} - //fsm.applySet("/", map[string]interface{}{ - // "a": map[string]interface{}{ - // "c": "AC", - // }, - //}, true) - //val6, err := fsm.Get("/") - //if err != nil { - // t.Fatalf("%v", err) - //} - //exp6 := map[string]interface{}{ - // "a": map[string]interface{}{ - // "c": "AC", - // }, - //} - //act6 := val6 - //if !reflect.DeepEqual(exp6, act6) { - // t.Fatalf("expected content to see %v, saw %v", exp6, act6) - //} -} - -func TestRaftFSM_Delete(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("%v", err) - } - defer func() { - err := os.RemoveAll(tmp) - if err != nil { - t.Fatalf("%v", err) - } - }() - - logger := logutils.NewLogger("DEBUG", "", 100, 5, 3, false) - - fsm, err := NewRaftFSM(tmp, logger) - if err != nil { - t.Fatalf("%v", err) - } - err = fsm.Start() - defer func() { - err := fsm.Stop() - if err != nil { - t.Fatalf("%v", err) - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // set {"a": 1} - err = fsm.SetValue("/", map[string]interface{}{"a": 1}, false) - if err != nil { - t.Fatalf("%v", err) - } - - value, err := fsm.GetValue("/a") - if err != nil { - t.Fatalf("%v", err) - } - - expectedValue := 1 - actualValue := value - if !cmp.Equal(expectedValue, actualValue) { - t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) - } - - err = fsm.DeleteValue("/a") - if err != nil { - t.Fatalf("%v", err) - } - - value, err = fsm.GetValue("/a") - if err == nil { - t.Fatalf("expected nil: %v", err) - } - - actualValue = value - if nil != actualValue { - t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) - } -} diff --git a/manager/raft_server.go b/manager/raft_server.go deleted file mode 100644 index 7918433..0000000 --- a/manager/raft_server.go +++ /dev/null @@ -1,641 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "encoding/json" - "errors" - "io/ioutil" - "net" - "os" - "path/filepath" - "sync" - "time" - - "github.com/blevesearch/bleve/mapping" - "github.com/gogo/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" - "github.com/hashicorp/raft" - raftboltdb "github.com/hashicorp/raft-boltdb" - _ "github.com/mosuka/blast/builtins" - blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/management" - "go.uber.org/zap" - //raftmdb "github.com/hashicorp/raft-mdb" -) - -type RaftServer struct { - node *management.Node - dataDir string - raftStorageType string - indexMapping *mapping.IndexMappingImpl - indexType string - indexStorageType string - bootstrap bool - logger *zap.Logger - - transport *raft.NetworkTransport - raft *raft.Raft - fsm *RaftFSM - mu sync.RWMutex -} - -func NewRaftServer(node *management.Node, dataDir string, raftStorageType string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { - return &RaftServer{ - node: node, - dataDir: dataDir, - raftStorageType: raftStorageType, - indexMapping: indexMapping, - indexType: indexType, - indexStorageType: indexStorageType, - bootstrap: bootstrap, - logger: logger, - }, nil -} - -func (s *RaftServer) Start() error { - var err error - - fsmPath := filepath.Join(s.dataDir, "store") - s.logger.Info("create finite state machine", zap.String("path", fsmPath)) - s.fsm, err = NewRaftFSM(fsmPath, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("start finite state machine") - err = s.fsm.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("create Raft config", zap.String("id", s.node.Id)) - raftConfig := raft.DefaultConfig() - raftConfig.LocalID = raft.ServerID(s.node.Id) - raftConfig.SnapshotThreshold = 1024 - raftConfig.LogOutput = ioutil.Discard - //if s.bootstrap { - // raftConfig.StartAsLeader = true - //} - - s.logger.Info("resolve TCP address", zap.String("bind_addr", s.node.BindAddress)) - addr, err := net.ResolveTCPAddr("tcp", s.node.BindAddress) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("create TCP transport", zap.String("bind_addr", s.node.BindAddress)) - s.transport, err = raft.NewTCPTransport(s.node.BindAddress, addr, 3, 10*time.Second, ioutil.Discard) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - snapshotPath := s.dataDir - s.logger.Info("create snapshot store", zap.String("path", snapshotPath)) - snapshotStore, err := raft.NewFileSnapshotStore(snapshotPath, 2, ioutil.Discard) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - s.logger.Info("create Raft machine") - var logStore raft.LogStore - var stableStore raft.StableStore - switch s.raftStorageType { - case "boltdb": - logStorePath := filepath.Join(s.dataDir, "raft", "log", "boltdb.db") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Dir(logStorePath), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - logStore, err = raftboltdb.NewBoltStore(logStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStorePath := filepath.Join(s.dataDir, "raft", "stable", "boltdb.db") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Dir(stableStorePath), 0755) - stableStore, err = raftboltdb.NewBoltStore(stableStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - //case "badger": - // logStorePath := filepath.Join(s.dataDir, "raft", "log") - // s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) - // err = os.MkdirAll(filepath.Join(logStorePath, "badger"), 0755) - // if err != nil { - // s.logger.Fatal(err.Error()) - // return err - // } - // logStore, err = raftbadgerdb.NewBadgerStore(logStorePath) - // if err != nil { - // s.logger.Fatal(err.Error()) - // return err - // } - // stableStorePath := filepath.Join(s.dataDir, "raft", "stable") - // s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) - // err = os.MkdirAll(filepath.Join(stableStorePath, "badger"), 0755) - // if err != nil { - // s.logger.Fatal(err.Error()) - // return err - // } - // stableStore, err = raftbadgerdb.NewBadgerStore(stableStorePath) - // if err != nil { - // s.logger.Fatal(err.Error()) - // return err - // } - default: - logStorePath := filepath.Join(s.dataDir, "raft", "log", "boltdb.db") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Dir(logStorePath), 0755) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - logStore, err = raftboltdb.NewBoltStore(logStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - stableStorePath := filepath.Join(s.dataDir, "raft", "stable", "boltdb.db") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) - err = os.MkdirAll(filepath.Dir(stableStorePath), 0755) - stableStore, err = raftboltdb.NewBoltStore(stableStorePath) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - } - - s.logger.Info("create Raft machine") - s.raft, err = raft.NewRaft(raftConfig, s.fsm, logStore, stableStore, snapshotStore, s.transport) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - if s.bootstrap { - s.logger.Info("configure Raft machine as bootstrap") - configuration := raft.Configuration{ - Servers: []raft.Server{ - { - ID: raftConfig.LocalID, - Address: s.transport.LocalAddr(), - }, - }, - } - s.raft.BootstrapCluster(configuration) - - s.logger.Info("wait for become a leader") - err = s.WaitForDetectLeader(60 * time.Second) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - // set node config - s.logger.Info("register its own node config", zap.Any("node", s.node)) - err = s.setNode(s.node) - if err != nil { - s.logger.Fatal(err.Error()) - return err - } - - // set index config - s.logger.Info("register index config") - b, err := json.Marshal(s.indexMapping) - if err != nil { - s.logger.Error(err.Error()) - return err - } - var indexMappingMap map[string]interface{} - err = json.Unmarshal(b, &indexMappingMap) - if err != nil { - s.logger.Error(err.Error()) - return err - } - indexConfig := map[string]interface{}{ - "index_mapping": indexMappingMap, - "index_type": s.indexType, - "index_storage_type": s.indexStorageType, - } - err = s.SetValue("index_config", indexConfig) - if err != nil { - s.logger.Error(err.Error(), zap.String("key", "index_config")) - return err - } - } - - return nil -} - -func (s *RaftServer) Stop() error { - s.logger.Info("shutdown Raft machine") - f := s.raft.Shutdown() - err := f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - s.logger.Info("stop finite state machine") - err = s.fsm.Stop() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) LeaderAddress(timeout time.Duration) (raft.ServerAddress, error) { - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - - timer := time.NewTimer(timeout) - defer timer.Stop() - - for { - select { - case <-ticker.C: - leaderAddr := s.raft.Leader() - if leaderAddr != "" { - s.logger.Debug("detect a leader", zap.String("address", string(leaderAddr))) - return leaderAddr, nil - } - case <-timer.C: - s.logger.Error("timeout exceeded") - return "", blasterrors.ErrTimeout - } - } -} - -func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { - leaderAddr, err := s.LeaderAddress(timeout) - if err != nil { - s.logger.Error(err.Error()) - return "", err - } - - cf := s.raft.GetConfiguration() - err = cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return "", err - } - - for _, server := range cf.Configuration().Servers { - if server.Address == leaderAddr { - return server.ID, nil - } - } - - s.logger.Error(blasterrors.ErrNotFoundLeader.Error()) - return "", blasterrors.ErrNotFoundLeader -} - -func (s *RaftServer) NodeAddress() string { - return string(s.transport.LocalAddr()) -} - -func (s *RaftServer) NodeID() string { - return s.node.Id -} - -func (s *RaftServer) Stats() map[string]string { - return s.raft.Stats() -} - -func (s *RaftServer) State() raft.RaftState { - return s.raft.State() -} - -func (s *RaftServer) IsLeader() bool { - return s.State() == raft.Leader -} - -func (s *RaftServer) WaitForDetectLeader(timeout time.Duration) error { - _, err := s.LeaderAddress(timeout) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) getNode(nodeId string) (*management.Node, error) { - nodeConfig, err := s.fsm.GetNode(nodeId) - if err != nil { - s.logger.Debug(err.Error(), zap.String("id", nodeId)) - return nil, err - } - - return nodeConfig, nil -} - -func (s *RaftServer) setNode(node *management.Node) error { - proposal := &management.Proposal{ - Event: management.Proposal_SET_NODE, - Node: node, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) - return err - } - - return nil -} - -func (s *RaftServer) deleteNode(nodeId string) error { - proposal := &management.Proposal{ - Event: management.Proposal_DELETE_NODE, - Node: &management.Node{ - Id: nodeId, - }, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err - } - - return nil -} - -func (s *RaftServer) GetNode(id string) (*management.Node, error) { - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - var node *management.Node - for _, server := range cf.Configuration().Servers { - if server.ID == raft.ServerID(id) { - node, err = s.getNode(id) - if err != nil { - s.logger.Debug(err.Error(), zap.String("id", id)) - return nil, err - } - break - } - } - - return node, nil -} - -func (s *RaftServer) SetNode(node *management.Node) error { - if !s.IsLeader() { - s.logger.Warn(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - for _, server := range cf.Configuration().Servers { - if server.ID == raft.ServerID(node.Id) { - s.logger.Info("node already joined the cluster", zap.Any("id", node.Id)) - return nil - } - } - - if node.BindAddress == "" { - err = errors.New("missing bind address") - s.logger.Error(err.Error(), zap.String("bind_addr", node.BindAddress)) - return err - } - - // add node to Raft cluster - s.logger.Info("join the node to the raft cluster", zap.String("id", node.Id), zap.Any("bind_address", node.BindAddress)) - f := s.raft.AddVoter(raft.ServerID(node.Id), raft.ServerAddress(node.BindAddress), 0, 0) - err = f.Error() - if err != nil { - s.logger.Error(err.Error(), zap.String("id", node.Id), zap.String("bind_address", node.BindAddress)) - return err - } - - // set node config - err = s.setNode(node) - if err != nil { - s.logger.Error(err.Error(), zap.Any("node", node)) - return err - } - - return nil -} - -func (s *RaftServer) DeleteNode(nodeId string) error { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err - } - - // delete node from Raft cluster - for _, server := range cf.Configuration().Servers { - if server.ID == raft.ServerID(nodeId) { - s.logger.Info("remove the node from the raft cluster", zap.String("id", nodeId)) - f := s.raft.RemoveServer(server.ID, 0, 0) - err = f.Error() - if err != nil { - s.logger.Error(err.Error(), zap.String("id", string(server.ID))) - return err - } - } - } - - // delete node config - err = s.deleteNode(nodeId) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId)) - return err - } - - return nil -} - -func (s *RaftServer) GetCluster() (*management.Cluster, error) { - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - cluster := &management.Cluster{Nodes: make(map[string]*management.Node, 0)} - for _, server := range cf.Configuration().Servers { - node, err := s.GetNode(string(server.ID)) - if err != nil { - s.logger.Debug(err.Error(), zap.String("id", string(server.ID))) - continue - } - - cluster.Nodes[string(server.ID)] = node - } - - return cluster, nil -} - -func (s *RaftServer) Snapshot() error { - f := s.raft.Snapshot() - err := f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) GetValue(key string) (interface{}, error) { - value, err := s.fsm.GetValue(key) - if err != nil { - switch err { - case blasterrors.ErrNotFound: - s.logger.Debug(err.Error(), zap.String("key", key)) - default: - s.logger.Error(err.Error(), zap.String("key", key)) - } - return nil, err - } - - return value, nil -} - -func (s *RaftServer) SetValue(key string, value interface{}) error { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - valueAny := &any.Any{} - err := protobuf.UnmarshalAny(value, valueAny) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - proposal := &management.Proposal{ - Event: management.Proposal_SET_VALUE, - KeyValue: &management.KeyValue{ - Key: key, - Value: valueAny, - }, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - s.logger.Error(err.Error()) - return err - } - - return nil -} - -func (s *RaftServer) DeleteValue(key string) error { - if !s.IsLeader() { - s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) - return raft.ErrNotLeader - } - - proposal := &management.Proposal{ - Event: management.Proposal_DELETE_VALUE, - KeyValue: &management.KeyValue{ - Key: key, - }, - } - proposalByte, err := proto.Marshal(proposal) - if err != nil { - s.logger.Error(err.Error()) - return err - } - - f := s.raft.Apply(proposalByte, 10*time.Second) - err = f.Error() - if err != nil { - s.logger.Error(err.Error()) - return err - } - err = f.Response().(*fsmResponse).error - if err != nil { - switch err { - case blasterrors.ErrNotFound: - s.logger.Debug(err.Error(), zap.String("key", key)) - default: - s.logger.Error(err.Error(), zap.String("key", key)) - } - return err - } - - return nil -} diff --git a/manager/server.go b/manager/server.go deleted file mode 100644 index 909b4fc..0000000 --- a/manager/server.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "github.com/blevesearch/bleve/mapping" - accesslog "github.com/mash/go-accesslog" - "github.com/mosuka/blast/protobuf/management" - "go.uber.org/zap" -) - -type Server struct { - peerGrpcAddr string - node *management.Node - dataDir string - raftStorageType string - indexMapping *mapping.IndexMappingImpl - indexType string - indexStorageType string - logger *zap.Logger - grpcLogger *zap.Logger - httpLogger accesslog.Logger - - raftServer *RaftServer - grpcService *GRPCService - grpcServer *GRPCServer - grpcGateway *GRPCGateway - httpRouter *Router - httpServer *HTTPServer -} - -func NewServer(peerGrpcAddr string, node *management.Node, dataDir string, raftStorageType string, indexMapping *mapping.IndexMappingImpl, indexType string, indexStorageType string, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { - return &Server{ - peerGrpcAddr: peerGrpcAddr, - node: node, - dataDir: dataDir, - raftStorageType: raftStorageType, - indexMapping: indexMapping, - indexType: indexType, - indexStorageType: indexStorageType, - logger: logger, - grpcLogger: grpcLogger, - httpLogger: httpLogger, - }, nil -} - -func (s *Server) Start() { - var err error - - // bootstrap node? - bootstrap := s.peerGrpcAddr == "" - s.logger.Info("bootstrap", zap.Bool("bootstrap", bootstrap)) - - // create raft server - s.raftServer, err = NewRaftServer(s.node, s.dataDir, s.raftStorageType, s.indexMapping, s.indexType, s.indexStorageType, bootstrap, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC service - s.grpcService, err = NewGRPCService(s.raftServer, s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC server - s.grpcServer, err = NewGRPCServer(s.node.Metadata.GrpcAddress, s.grpcService, s.grpcLogger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create gRPC gateway - s.grpcGateway, err = NewGRPCGateway(s.node.Metadata.GrpcGatewayAddress, s.node.Metadata.GrpcAddress, s.logger) - if err != nil { - s.logger.Error(err.Error()) - return - } - - // create HTTP router - s.httpRouter, err = NewRouter(s.logger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // create HTTP server - s.httpServer, err = NewHTTPServer(s.node.Metadata.HttpAddress, s.httpRouter, s.logger, s.httpLogger) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // start Raft server - s.logger.Info("start Raft server") - err = s.raftServer.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - // start gRPC service - s.logger.Info("start gRPC service") - go func() { - err := s.grpcService.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - - // start gRPC server - s.logger.Info("start gRPC server") - go func() { - err := s.grpcServer.Start() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - }() - - // start gRPC gateway - s.logger.Info("start gRPC gateway") - go func() { - _ = s.grpcGateway.Start() - }() - - // start HTTP server - s.logger.Info("start HTTP server") - go func() { - _ = s.httpServer.Start() - }() - - // join to the existing cluster - if !bootstrap { - client, err := NewGRPCClient(s.peerGrpcAddr) - defer func() { - err := client.Close() - if err != nil { - s.logger.Error(err.Error()) - } - }() - if err != nil { - s.logger.Fatal(err.Error()) - return - } - - req := &management.ClusterJoinRequest{ - Node: s.node, - } - - _, err = client.ClusterJoin(req) - if err != nil { - s.logger.Fatal(err.Error()) - return - } - } -} - -func (s *Server) Stop() { - s.logger.Info("stop HTTP server") - err := s.httpServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop HTTP router") - err = s.httpRouter.Close() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC gateway") - err = s.grpcGateway.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC server") - err = s.grpcServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop gRPC service") - err = s.grpcService.Stop() - if err != nil { - s.logger.Error(err.Error()) - } - - s.logger.Info("stop Raft server") - err = s.raftServer.Stop() - if err != nil { - s.logger.Error(err.Error()) - } -} - -func (s *Server) BindAddress() string { - return s.raftServer.NodeAddress() -} - -func (s *Server) GrpcAddress() string { - address, err := s.grpcServer.GetAddress() - if err != nil { - return "" - } - - return address -} - -func (s *Server) HttpAddress() string { - address, err := s.grpcGateway.GetAddress() - if err != nil { - return "" - } - - return address -} diff --git a/manager/server_test.go b/manager/server_test.go deleted file mode 100644 index 0b863c3..0000000 --- a/manager/server_test.go +++ /dev/null @@ -1,2774 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package manager - -import ( - "fmt" - "os" - "path/filepath" - "reflect" - "testing" - "time" - - "github.com/golang/protobuf/ptypes/any" - "github.com/golang/protobuf/ptypes/empty" - "github.com/google/go-cmp/cmp" - "github.com/mosuka/blast/indexutils" - "github.com/mosuka/blast/logutils" - "github.com/mosuka/blast/protobuf" - "github.com/mosuka/blast/protobuf/management" - "github.com/mosuka/blast/strutils" - "github.com/mosuka/blast/testutils" -) - -func TestServer_Start(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &management.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) -} - -func TestServer_HealthCheck(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &management.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // healthiness - reqHealthiness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_HEALTHINESS} - resHealthiness, err := client.NodeHealthCheck(reqHealthiness) - if err != nil { - t.Fatalf("%v", err) - } - expHealthiness := management.NodeHealthCheckResponse_HEALTHY - actHealthiness := resHealthiness.State - if expHealthiness != actHealthiness { - t.Fatalf("expected content to see %v, saw %v", expHealthiness, actHealthiness) - } - - // liveness - reqLiveness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_LIVENESS} - resLiveness, err := client.NodeHealthCheck(reqLiveness) - if err != nil { - t.Fatalf("%v", err) - } - expLiveness := management.NodeHealthCheckResponse_ALIVE - actLiveness := resLiveness.State - if expLiveness != actLiveness { - t.Fatalf("expected content to see %v, saw %v", expLiveness, actLiveness) - } - - // readiness - reqReadiness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_READINESS} - resReadiness, err := client.NodeHealthCheck(reqReadiness) - if err != nil { - t.Fatalf("%v", err) - } - expReadiness := management.NodeHealthCheckResponse_READY - actReadiness := resReadiness.State - if expReadiness != actReadiness { - t.Fatalf("expected content to see %v, saw %v", expReadiness, actReadiness) - } -} - -func TestServer_GetNode(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewawyAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &management.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewawyAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get node - res, err := client.NodeInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expNodeInfo := &management.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewawyAddress, - HttpAddress: httpAddress, - }, - } - actNodeInfo := res.Node - if !reflect.DeepEqual(expNodeInfo, actNodeInfo) { - t.Fatalf("expected content to see %v, saw %v", expNodeInfo, actNodeInfo) - } -} - -func TestServer_GetCluster(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &management.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get cluster - res, err := client.ClusterInfo(&empty.Empty{}) - if err != nil { - t.Fatalf("%v", err) - } - expCluster := &management.Cluster{ - Nodes: map[string]*management.Node{ - nodeId: { - Id: nodeId, - BindAddress: bindAddress, - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - }, - }, - } - actCluster := res.Cluster - if !reflect.DeepEqual(expCluster, actCluster) { - t.Fatalf("expected content to see %v, saw %v", expCluster, actCluster) - } -} - -func TestServer_Set(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &management.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // set value - valueAny := &any.Any{} - err = protobuf.UnmarshalAny("val1", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq := &management.SetRequest{ - Key: "test/key1", - Value: valueAny, - } - _, err = client.Set(setReq) - if err != nil { - t.Fatalf("%v", err) - } - - // get value - getReq := &management.GetRequest{ - Key: "test/key1", - } - getRes, err := client.Get(getReq) - if err != nil { - t.Fatalf("%v", err) - } - - expVal1 := "val1" - - val1, err := protobuf.MarshalAny(getRes.Value) - actVal1 := *val1.(*string) - - if !cmp.Equal(expVal1, actVal1) { - t.Fatalf("expected content to see %v, saw %v", expVal1, actVal1) - } -} - -func TestServer_Get(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &management.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // set value - valueAny := &any.Any{} - err = protobuf.UnmarshalAny("val1", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq := &management.SetRequest{ - Key: "test/key1", - Value: valueAny, - } - _, err = client.Set(setReq) - if err != nil { - t.Fatalf("%v", err) - } - - // get value - getReq := &management.GetRequest{Key: "test/key1"} - getRes, err := client.Get(getReq) - if err != nil { - t.Fatalf("%v", err) - } - - expVal1 := "val1" - - val1, err := protobuf.MarshalAny(getRes.Value) - actVal1 := *val1.(*string) - - if !cmp.Equal(expVal1, actVal1) { - t.Fatalf("expected content to see %v, saw %v", expVal1, actVal1) - } -} - -func TestServer_Delete(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress := "" - grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir) - }() - raftStorageType := "boltdb" - - node := &management.Node{ - Id: nodeId, - BindAddress: bindAddress, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress, - GrpcGatewayAddress: grpcGatewayAddress, - HttpAddress: httpAddress, - }, - } - - indexMapping, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType := "upside_down" - indexStorageType := "boltdb" - - // create server - server, err := NewServer(peerGrpcAddress, node, dataDir, raftStorageType, indexMapping, indexType, indexStorageType, logger, grpcLogger, httpAccessLogger) - defer func() { - if server != nil { - server.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(node.Metadata.GrpcAddress) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // set value - valueAny := &any.Any{} - if err != nil { - t.Fatalf("%v", err) - } - err = protobuf.UnmarshalAny("val1", valueAny) - setReq := &management.SetRequest{ - Key: "test/key1", - Value: valueAny, - } - _, err = client.Set(setReq) - if err != nil { - t.Fatalf("%v", err) - } - - // get value - getReq := &management.GetRequest{ - Key: "test/key1", - } - res, err := client.Get(getReq) - if err != nil { - t.Fatalf("%v", err) - } - - expVal1 := "val1" - - val1, err := protobuf.MarshalAny(res.Value) - actVal1 := *val1.(*string) - - if !cmp.Equal(expVal1, actVal1) { - t.Fatalf("expected content to see %v, saw %v", expVal1, actVal1) - } - - // delete value - deleteReq := &management.DeleteRequest{ - Key: "test/key1", - } - _, err = client.Delete(deleteReq) - if err != nil { - t.Fatalf("%v", err) - } - - // delete non-existing data - deleteNonExistingReq := &management.DeleteRequest{ - Key: "test/non-existing", - } - _, err = client.Delete(deleteNonExistingReq) - if err != nil { - t.Fatalf("%v", err) - } -} - -func TestCluster_Start(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &management.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &management.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &management.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) -} - -func TestCluster_HealthCheck(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &management.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &management.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &management.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - - reqHealtiness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_HEALTHINESS} - reqLiveness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_LIVENESS} - reqReadiness := &management.NodeHealthCheckRequest{Probe: management.NodeHealthCheckRequest_READINESS} - - // healthiness - resHealthiness1, err := client1.NodeHealthCheck(reqHealtiness) - if err != nil { - t.Fatalf("%v", err) - } - expHealthiness1 := management.NodeHealthCheckResponse_HEALTHY - actHealthiness1 := resHealthiness1.State - if expHealthiness1 != actHealthiness1 { - t.Fatalf("expected content to see %v, saw %v", expHealthiness1, actHealthiness1) - } - - // liveness - resLiveness1, err := client1.NodeHealthCheck(reqLiveness) - if err != nil { - t.Fatalf("%v", err) - } - expLiveness1 := management.NodeHealthCheckResponse_ALIVE - actLiveness1 := resLiveness1.State - if expLiveness1 != actLiveness1 { - t.Fatalf("expected content to see %v, saw %v", expLiveness1, actLiveness1) - } - - // readiness - resReadiness1, err := client1.NodeHealthCheck(reqReadiness) - if err != nil { - t.Fatalf("%v", err) - } - expReadiness1 := management.NodeHealthCheckResponse_READY - actReadiness1 := resReadiness1.State - if expReadiness1 != actReadiness1 { - t.Fatalf("expected content to see %v, saw %v", expReadiness1, actReadiness1) - } - - // healthiness - resHealthiness2, err := client2.NodeHealthCheck(reqHealtiness) - if err != nil { - t.Fatalf("%v", err) - } - expHealthiness2 := management.NodeHealthCheckResponse_HEALTHY - actHealthiness2 := resHealthiness2.State - if expHealthiness2 != actHealthiness2 { - t.Fatalf("expected content to see %v, saw %v", expHealthiness2, actHealthiness2) - } - - // liveness - resLiveness2, err := client2.NodeHealthCheck(reqLiveness) - if err != nil { - t.Fatalf("%v", err) - } - expLiveness2 := management.NodeHealthCheckResponse_ALIVE - actLiveness2 := resLiveness2.State - if expLiveness2 != actLiveness2 { - t.Fatalf("expected content to see %v, saw %v", expLiveness2, actLiveness2) - } - - // readiness - resReadiness2, err := client2.NodeHealthCheck(reqReadiness) - if err != nil { - t.Fatalf("%v", err) - } - expReadiness2 := management.NodeHealthCheckResponse_READY - actReadiness2 := resReadiness2.State - if expReadiness2 != actReadiness2 { - t.Fatalf("expected content to see %v, saw %v", expReadiness2, actReadiness2) - } - - // healthiness - resHealthiness3, err := client3.NodeHealthCheck(reqHealtiness) - if err != nil { - t.Fatalf("%v", err) - } - expHealthiness3 := management.NodeHealthCheckResponse_HEALTHY - actHealthiness3 := resHealthiness3.State - if expHealthiness3 != actHealthiness3 { - t.Fatalf("expected content to see %v, saw %v", expHealthiness3, actHealthiness3) - } - - // liveness - resLiveness3, err := client3.NodeHealthCheck(reqLiveness) - if err != nil { - t.Fatalf("%v", err) - } - expLiveness3 := management.NodeHealthCheckResponse_ALIVE - actLiveness3 := resLiveness3.State - if expLiveness3 != actLiveness3 { - t.Fatalf("expected content to see %v, saw %v", expLiveness3, actLiveness3) - } - - // readiness - resReadiness3, err := client3.NodeHealthCheck(reqReadiness) - if err != nil { - t.Fatalf("%v", err) - } - expReadiness3 := management.NodeHealthCheckResponse_READY - actReadiness3 := resReadiness3.State - if expReadiness3 != actReadiness3 { - t.Fatalf("expected content to see %v, saw %v", expReadiness3, actReadiness3) - } -} - -func TestCluster_GetNode(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &management.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &management.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &management.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get all node info from all nodes - req := &empty.Empty{} - resNodeInfo11, err := client1.NodeInfo(req) - if err != nil { - t.Fatalf("%v", err) - } - expNode11 := &management.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - actNode11 := resNodeInfo11.Node - if !reflect.DeepEqual(expNode11, actNode11) { - t.Fatalf("expected content to see %v, saw %v", expNode11, actNode11) - } - - resNodeInfo21, err := client2.NodeInfo(req) - if err != nil { - t.Fatalf("%v", err) - } - expNode21 := &management.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - actNode21 := resNodeInfo21.Node - if !reflect.DeepEqual(expNode21, actNode21) { - t.Fatalf("expected content to see %v, saw %v", expNode21, actNode21) - } - - resNodeInfo31, err := client3.NodeInfo(req) - if err != nil { - t.Fatalf("%v", err) - } - expNode31 := &management.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - actNode31 := resNodeInfo31.Node - if !reflect.DeepEqual(expNode31, actNode31) { - t.Fatalf("expected content to see %v, saw %v", expNode31, actNode31) - } -} - -func TestCluster_GetCluster(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &management.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &management.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &management.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - - // get cluster info from manager1 - req := &empty.Empty{} - resClusterInfo1, err := client1.ClusterInfo(req) - if err != nil { - t.Fatalf("%v", err) - } - expCluster1 := &management.Cluster{ - Nodes: map[string]*management.Node{ - nodeId1: { - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - }, - nodeId2: { - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - }, - nodeId3: { - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - }, - }, - } - actCluster1 := resClusterInfo1.Cluster - if !reflect.DeepEqual(expCluster1, actCluster1) { - t.Fatalf("expected content to see %v, saw %v", expCluster1, actCluster1) - } - - resClusterInfo2, err := client2.ClusterInfo(req) - if err != nil { - t.Fatalf("%v", err) - } - expCluster2 := &management.Cluster{ - Nodes: map[string]*management.Node{ - nodeId1: { - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - }, - nodeId2: { - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - }, - nodeId3: { - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - }, - }, - } - actCluster2 := resClusterInfo2.Cluster - if !reflect.DeepEqual(expCluster2, actCluster2) { - t.Fatalf("expected content to see %v, saw %v", expCluster2, actCluster2) - } - - resClusterInfo3, err := client3.ClusterInfo(req) - if err != nil { - t.Fatalf("%v", err) - } - expCluster3 := &management.Cluster{ - Nodes: map[string]*management.Node{ - nodeId1: { - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_LEADER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - }, - nodeId2: { - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - }, - nodeId3: { - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_FOLLOWER, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - }, - }, - } - actCluster3 := resClusterInfo3.Cluster - if !reflect.DeepEqual(expCluster3, actCluster3) { - t.Fatalf("expected content to see %v, saw %v", expCluster3, actCluster3) - } -} - -func TestCluster_Set(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := "node-1" - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &management.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - // sleep - time.Sleep(5 * time.Second) - - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := "node-2" - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &management.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - // sleep - time.Sleep(5 * time.Second) - - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := "node-3" - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &management.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - - valueAny := &any.Any{} - err = protobuf.UnmarshalAny("val1", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq1 := &management.SetRequest{ - Key: "test/key1", - Value: valueAny, - } - _, err = client1.Set(setReq1) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getReq1 := &management.GetRequest{ - Key: "test/key1", - } - getRes11, err := client1.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - val11, err := protobuf.MarshalAny(getRes11.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal11 := "val1" - actVal11 := *val11.(*string) - if !cmp.Equal(expVal11, actVal11) { - t.Fatalf("expected content to see %v, saw %v", expVal11, actVal11) - } - getRes21, err := client2.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - val21, err := protobuf.MarshalAny(getRes21.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal21 := "val1" - actVal21 := *val21.(*string) - if !cmp.Equal(expVal21, actVal21) { - t.Fatalf("expected content to see %v, saw %v", expVal21, actVal21) - } - getRes31, err := client3.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - val31, err := protobuf.MarshalAny(getRes31.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal31 := "val1" - actVal31 := *val31.(*string) - if !cmp.Equal(expVal31, actVal31) { - t.Fatalf("expected content to see %v, saw %v", expVal31, actVal31) - } - - valueAny = &any.Any{} - err = protobuf.UnmarshalAny("val2", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq2 := &management.SetRequest{ - Key: "test/key2", - Value: valueAny, - } - _, err = client2.Set(setReq2) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getReq2 := &management.GetRequest{ - Key: "test/key2", - } - getRes12, err := client1.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - val12, err := protobuf.MarshalAny(getRes12.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal12 := "val2" - actVal12 := *val12.(*string) - if !cmp.Equal(expVal12, actVal12) { - t.Fatalf("expected content to see %v, saw %v", expVal12, actVal12) - } - getRes22, err := client2.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - val22, err := protobuf.MarshalAny(getRes22.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal22 := "val2" - actVal22 := *val22.(*string) - if !cmp.Equal(expVal22, actVal22) { - t.Fatalf("expected content to see %v, saw %v", expVal22, actVal22) - } - getRes32, err := client3.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - val32, err := protobuf.MarshalAny(getRes32.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal32 := "val2" - actVal32 := *val32.(*string) - if !cmp.Equal(expVal32, actVal32) { - t.Fatalf("expected content to see %v, saw %v", expVal32, actVal32) - } - - valueAny = &any.Any{} - err = protobuf.UnmarshalAny("val3", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq3 := &management.SetRequest{ - Key: "test/key3", - Value: valueAny, - } - _, err = client3.Set(setReq3) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getReq3 := &management.GetRequest{ - Key: "test/key3", - } - getRes13, err := client1.Get(getReq3) - if err != nil { - t.Fatalf("%v", err) - } - val13, err := protobuf.MarshalAny(getRes13.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal13 := "val3" - actVal13 := *val13.(*string) - if !cmp.Equal(expVal13, actVal13) { - t.Fatalf("expected content to see %v, saw %v", expVal13, actVal13) - } - getRes23, err := client2.Get(getReq3) - if err != nil { - t.Fatalf("%v", err) - } - val23, err := protobuf.MarshalAny(getRes23.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal23 := "val3" - actVal23 := *val23.(*string) - if !cmp.Equal(expVal23, actVal23) { - t.Fatalf("expected content to see %v, saw %v", expVal23, actVal23) - } - getRes33, err := client3.Get(getReq3) - if err != nil { - t.Fatalf("%v", err) - } - val33, err := protobuf.MarshalAny(getRes33.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal33 := "val3" - actVal33 := *val33.(*string) - if !cmp.Equal(expVal33, actVal33) { - t.Fatalf("expected content to see %v, saw %v", expVal33, actVal33) - } -} - -func TestCluster_Get(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := "node-1" - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &management.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - // sleep - time.Sleep(5 * time.Second) - - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := "node-2" - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &management.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - // sleep - time.Sleep(5 * time.Second) - - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := "node-3" - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &management.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - - valueAny := &any.Any{} - err = protobuf.UnmarshalAny("val1", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq1 := &management.SetRequest{ - Key: "test/key1", - Value: valueAny, - } - _, err = client1.Set(setReq1) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getReq1 := &management.GetRequest{ - Key: "test/key1", - } - getRes11, err := client1.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - val11, err := protobuf.MarshalAny(getRes11.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal11 := "val1" - actVal11 := *val11.(*string) - if !cmp.Equal(expVal11, actVal11) { - t.Fatalf("expected content to see %v, saw %v", expVal11, actVal11) - } - getRes21, err := client2.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - val21, err := protobuf.MarshalAny(getRes21.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal21 := "val1" - actVal21 := *val21.(*string) - if !cmp.Equal(expVal21, actVal21) { - t.Fatalf("expected content to see %v, saw %v", expVal21, actVal21) - } - getRes31, err := client3.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - val31, err := protobuf.MarshalAny(getRes31.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal31 := "val1" - actVal31 := *val31.(*string) - if !cmp.Equal(expVal31, actVal31) { - t.Fatalf("expected content to see %v, saw %v", expVal31, actVal31) - } - - valueAny = &any.Any{} - err = protobuf.UnmarshalAny("val2", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq2 := &management.SetRequest{ - Key: "test/key2", - Value: valueAny, - } - _, err = client2.Set(setReq2) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getReq2 := &management.GetRequest{ - Key: "test/key2", - } - getRes12, err := client1.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - val12, err := protobuf.MarshalAny(getRes12.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal12 := "val2" - actVal12 := *val12.(*string) - if !cmp.Equal(expVal12, actVal12) { - t.Fatalf("expected content to see %v, saw %v", expVal12, actVal12) - } - getRes22, err := client2.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - val22, err := protobuf.MarshalAny(getRes22.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal22 := "val2" - actVal22 := *val22.(*string) - if !cmp.Equal(expVal22, actVal22) { - t.Fatalf("expected content to see %v, saw %v", expVal22, actVal22) - } - getRes32, err := client3.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - val32, err := protobuf.MarshalAny(getRes32.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal32 := "val2" - actVal32 := *val32.(*string) - if !cmp.Equal(expVal32, actVal32) { - t.Fatalf("expected content to see %v, saw %v", expVal32, actVal32) - } - - valueAny = &any.Any{} - err = protobuf.UnmarshalAny("val3", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq3 := &management.SetRequest{ - Key: "test/key3", - Value: valueAny, - } - _, err = client3.Set(setReq3) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getReq3 := &management.GetRequest{ - Key: "test/key3", - } - getRes13, err := client1.Get(getReq3) - if err != nil { - t.Fatalf("%v", err) - } - val13, err := protobuf.MarshalAny(getRes13.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal13 := "val3" - actVal13 := *val13.(*string) - if !cmp.Equal(expVal13, actVal13) { - t.Fatalf("expected content to see %v, saw %v", expVal13, actVal13) - } - getRes23, err := client2.Get(getReq3) - if err != nil { - t.Fatalf("%v", err) - } - val23, err := protobuf.MarshalAny(getRes23.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal23 := "val3" - actVal23 := *val23.(*string) - if !cmp.Equal(expVal23, actVal23) { - t.Fatalf("expected content to see %v, saw %v", expVal23, actVal23) - } - getRes33, err := client3.Get(getReq3) - if err != nil { - t.Fatalf("%v", err) - } - val33, err := protobuf.MarshalAny(getRes33.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal33 := "val3" - actVal33 := *val33.(*string) - if !cmp.Equal(expVal33, actVal33) { - t.Fatalf("expected content to see %v, saw %v", expVal33, actVal33) - } -} - -func TestCluster_Delete(t *testing.T) { - curDir, _ := os.Getwd() - - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - peerGrpcAddress1 := "" - grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId1 := "node-1" - bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir1 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir1) - }() - raftStorageType1 := "boltdb" - - node1 := &management.Node{ - Id: nodeId1, - BindAddress: bindAddress1, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress1, - GrpcGatewayAddress: grpcGatewayAddress1, - HttpAddress: httpAddress1, - }, - } - - indexMapping1, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType1 := "upside_down" - indexStorageType1 := "boltdb" - - // create server - server1, err := NewServer(peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexMapping1, indexType1, indexStorageType1, logger, grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server1.Start() - - // sleep - time.Sleep(5 * time.Second) - - peerGrpcAddress2 := grpcAddress1 - grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId2 := "node-2" - bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir2 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir2) - }() - raftStorageType2 := "boltdb" - - node2 := &management.Node{ - Id: nodeId2, - BindAddress: bindAddress2, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress2, - GrpcGatewayAddress: grpcGatewayAddress2, - HttpAddress: httpAddress2, - }, - } - - indexMapping2, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType2 := "upside_down" - indexStorageType2 := "boltdb" - - // create server - server2, err := NewServer(peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexMapping2, indexType2, indexStorageType2, logger, grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server2.Start() - - // sleep - time.Sleep(5 * time.Second) - - peerGrpcAddress3 := grpcAddress1 - grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - grpcGatewayAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - nodeId3 := "node-3" - bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) - dataDir3 := testutils.TmpDir() - defer func() { - _ = os.RemoveAll(dataDir3) - }() - raftStorageType3 := "boltdb" - - node3 := &management.Node{ - Id: nodeId3, - BindAddress: bindAddress3, - State: management.Node_UNKNOWN, - Metadata: &management.Metadata{ - GrpcAddress: grpcAddress3, - GrpcGatewayAddress: grpcGatewayAddress3, - HttpAddress: httpAddress3, - }, - } - - indexMapping3, err := indexutils.NewIndexMappingFromFile(filepath.Join(curDir, "../example/wiki_index_mapping.json")) - if err != nil { - t.Fatalf("%v", err) - } - indexType3 := "upside_down" - indexStorageType3 := "boltdb" - - // create server - server3, err := NewServer(peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexMapping3, indexType3, indexStorageType3, logger, grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) - } - - // start server - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - - // gRPC client for all servers - client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) - defer func() { - _ = client2.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - - valueAny := &any.Any{} - err = protobuf.UnmarshalAny("val1", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq1 := &management.SetRequest{ - Key: "test/key1", - Value: valueAny, - } - _, err = client1.Set(setReq1) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getReq1 := &management.GetRequest{ - Key: "test/key1", - } - getRes11, err := client1.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - val11, err := protobuf.MarshalAny(getRes11.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal11 := "val1" - actVal11 := *val11.(*string) - if !cmp.Equal(expVal11, actVal11) { - t.Fatalf("expected content to see %v, saw %v", expVal11, actVal11) - } - getRes21, err := client2.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - val21, err := protobuf.MarshalAny(getRes21.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal21 := "val1" - actVal21 := *val21.(*string) - if !cmp.Equal(expVal21, actVal21) { - t.Fatalf("expected content to see %v, saw %v", expVal21, actVal21) - } - getRes31, err := client3.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - val31, err := protobuf.MarshalAny(getRes31.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal31 := "val1" - actVal31 := *val31.(*string) - if !cmp.Equal(expVal31, actVal31) { - t.Fatalf("expected content to see %v, saw %v", expVal31, actVal31) - } - - valueAny = &any.Any{} - err = protobuf.UnmarshalAny("val2", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq2 := &management.SetRequest{ - Key: "test/key2", - Value: valueAny, - } - _, err = client2.Set(setReq2) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getReq2 := &management.GetRequest{ - Key: "test/key2", - } - getRes12, err := client1.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - val12, err := protobuf.MarshalAny(getRes12.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal12 := "val2" - actVal12 := *val12.(*string) - if !cmp.Equal(expVal12, actVal12) { - t.Fatalf("expected content to see %v, saw %v", expVal12, actVal12) - } - getRes22, err := client2.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - val22, err := protobuf.MarshalAny(getRes22.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal22 := "val2" - actVal22 := *val22.(*string) - if !cmp.Equal(expVal22, actVal22) { - t.Fatalf("expected content to see %v, saw %v", expVal22, actVal22) - } - getRes32, err := client3.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - val32, err := protobuf.MarshalAny(getRes32.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal32 := "val2" - actVal32 := *val32.(*string) - if !cmp.Equal(expVal32, actVal32) { - t.Fatalf("expected content to see %v, saw %v", expVal32, actVal32) - } - - valueAny = &any.Any{} - err = protobuf.UnmarshalAny("val3", valueAny) - if err != nil { - t.Fatalf("%v", err) - } - setReq3 := &management.SetRequest{ - Key: "test/key3", - Value: valueAny, - } - _, err = client3.Set(setReq3) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getReq3 := &management.GetRequest{ - Key: "test/key3", - } - getRes13, err := client1.Get(getReq3) - if err != nil { - t.Fatalf("%v", err) - } - val13, err := protobuf.MarshalAny(getRes13.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal13 := "val3" - actVal13 := *val13.(*string) - if !cmp.Equal(expVal13, actVal13) { - t.Fatalf("expected content to see %v, saw %v", expVal13, actVal13) - } - getRes23, err := client2.Get(getReq3) - if err != nil { - t.Fatalf("%v", err) - } - val23, err := protobuf.MarshalAny(getRes23.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal23 := "val3" - actVal23 := *val23.(*string) - if !cmp.Equal(expVal23, actVal23) { - t.Fatalf("expected content to see %v, saw %v", expVal23, actVal23) - } - getRes33, err := client3.Get(getReq3) - if err != nil { - t.Fatalf("%v", err) - } - val33, err := protobuf.MarshalAny(getRes33.Value) - if err != nil { - t.Fatalf("%v", err) - } - expVal33 := "val3" - actVal33 := *val33.(*string) - if !cmp.Equal(expVal33, actVal33) { - t.Fatalf("expected content to see %v, saw %v", expVal33, actVal33) - } - - // delete - deleteReq1 := &management.DeleteRequest{ - Key: "test/key1", - } - _, err = client1.Delete(deleteReq1) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getRes11, err = client1.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - if getRes11.Value != nil { - t.Fatalf("%v", err) - } - getRes21, err = client2.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - if getRes21.Value != nil { - t.Fatalf("%v", err) - } - getRes31, err = client3.Get(getReq1) - if err != nil { - t.Fatalf("%v", err) - } - if getRes31.Value != nil { - t.Fatalf("%v", err) - } - - deleteReq2 := &management.DeleteRequest{ - Key: "test/key2", - } - _, err = client2.Delete(deleteReq2) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // get value from all nodes - getRes12, err = client1.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - if getRes12.Value != nil { - t.Fatalf("%v", err) - } - getRes22, err = client2.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - if getRes22.Value != nil { - t.Fatalf("%v", err) - } - getRes32, err = client3.Get(getReq2) - if err != nil { - t.Fatalf("%v", err) - } - if getRes32.Value != nil { - t.Fatalf("%v", err) - } - - deleteReq3 := &management.DeleteRequest{ - Key: "test/key2", - } - _, err = client3.Delete(deleteReq3) - if err != nil { - t.Fatalf("%v", err) - } - time.Sleep(2 * time.Second) // wait for data to propagate - - // delete non-existing data from manager1 - deleteNonExistingReq := &management.DeleteRequest{ - Key: "test/non-existing", - } - _, err = client1.Delete(deleteNonExistingReq) - if err != nil { - t.Fatalf("%v", err) - } - - // delete non-existing data from manager2 - _, err = client2.Delete(deleteNonExistingReq) - if err != nil { - t.Fatalf("%v", err) - } - - // delete non-existing data from manager3 - _, err = client3.Delete(deleteNonExistingReq) - if err != nil { - t.Fatalf("%v", err) - } -} diff --git a/indexutils/indexutils.go b/mapping/mapping.go similarity index 63% rename from indexutils/indexutils.go rename to mapping/mapping.go index 5c2dcfa..7bf0d24 100644 --- a/indexutils/indexutils.go +++ b/mapping/mapping.go @@ -1,18 +1,4 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package indexutils +package mapping import ( "encoding/json" @@ -22,16 +8,18 @@ import ( "github.com/blevesearch/bleve/mapping" ) +func NewIndexMapping() *mapping.IndexMappingImpl { + return mapping.NewIndexMapping() +} + func NewIndexMappingFromBytes(indexMappingBytes []byte) (*mapping.IndexMappingImpl, error) { indexMapping := mapping.NewIndexMapping() - err := indexMapping.UnmarshalJSON(indexMappingBytes) - if err != nil { + if err := indexMapping.UnmarshalJSON(indexMappingBytes); err != nil { return nil, err } - err = indexMapping.Validate() - if err != nil { + if err := indexMapping.Validate(); err != nil { return nil, err } diff --git a/maputils/error.go b/maputils/error.go deleted file mode 100644 index 455c9fc..0000000 --- a/maputils/error.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package maputils - -import "errors" - -var ( - ErrNotFound = errors.New("not found") -) diff --git a/maputils/maputils.go b/maputils/maputils.go deleted file mode 100644 index a5922fd..0000000 --- a/maputils/maputils.go +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package maputils - -import ( - "encoding/json" - "errors" - "strings" - - "github.com/imdario/mergo" - "github.com/stretchr/objx" - yaml "gopkg.in/yaml.v2" -) - -func splitKey(path string) []string { - keys := make([]string, 0) - for _, k := range strings.Split(path, "/") { - if k != "" { - keys = append(keys, k) - } - } - - return keys -} - -func makeSelector(key string) string { - return strings.Join(splitKey(key), objx.PathSeparator) -} - -func normalize(value interface{}) interface{} { - switch value.(type) { - case map[string]interface{}: - ret := Map{} - for k, v := range value.(map[string]interface{}) { - ret[k] = normalize(v) - } - return ret - case map[interface{}]interface{}: // when unmarshaled by yaml - ret := Map{} - for k, v := range value.(map[interface{}]interface{}) { - ret[k.(string)] = normalize(v) - } - return ret - case []interface{}: - ret := make([]interface{}, 0) - for _, v := range value.([]interface{}) { - ret = append(ret, normalize(v)) - } - return ret - case bool, string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr, float32, float64, complex64, complex128: - return value - default: - return value - } -} - -func makeMap(path string, value interface{}) interface{} { - var ret interface{} - - keys := splitKey(path) - - if len(keys) >= 1 { - ret = Map{keys[0]: makeMap(strings.Join(keys[1:], "/"), value)} - } else if len(keys) == 0 { - ret = normalize(value) - } - - return ret -} - -type Map map[string]interface{} - -func New() Map { - return Map{} -} - -func FromMap(src map[string]interface{}) Map { - return normalize(src).(Map) -} - -func FromJSON(src []byte) (Map, error) { - t := map[string]interface{}{} - err := json.Unmarshal(src, &t) - if err != nil { - return nil, err - } - - return FromMap(t), nil -} - -func FromYAML(src []byte) (Map, error) { - t := map[string]interface{}{} - err := yaml.Unmarshal(src, &t) - if err != nil { - return nil, err - } - - return FromMap(t), nil -} - -func (m Map) Has(key string) (bool, error) { - _, err := m.Get(key) - if err != nil { - return false, err - } - - return true, nil -} - -func (m Map) Set(key string, value interface{}) error { - _ = m.Delete(key) - - err := m.Merge(key, value) - if err != nil { - return err - } - - return nil -} - -func (m Map) Merge(key string, value interface{}) error { - mm := makeMap(key, value).(Map) - - err := mergo.Merge(&m, mm, mergo.WithOverride) - if err != nil { - return err - } - - return nil -} - -func (m Map) Get(key string) (interface{}, error) { - var tmpMap interface{} - - tmpMap = m - - keys := splitKey(key) - - if len(keys) <= 0 { - return tmpMap.(Map).ToMap(), nil - } - - iter := newIterator(splitKey(key)) - var value interface{} - for { - k, err := iter.value() - if err != nil { - return nil, err - } - - if _, ok := tmpMap.(Map)[k]; !ok { - return nil, ErrNotFound - } - - if iter.hasNext() { - tmpMap = tmpMap.(Map)[k] - iter.next() - } else { - value = tmpMap.(Map)[k] - break - } - } - - switch value.(type) { - case Map: - return value.(Map).ToMap(), nil - default: - return value, nil - } -} - -func (m Map) Delete(key string) error { - var tmpMap interface{} - - tmpMap = m - - keys := splitKey(key) - - if len(keys) <= 0 { - // clear map - err := m.Clear() - if err != nil { - return err - } - return nil - } - - iter := newIterator(splitKey(key)) - for { - k, err := iter.value() - if err != nil { - return err - } - - if _, ok := tmpMap.(Map)[k]; !ok { - return ErrNotFound - } - - if iter.hasNext() { - tmpMap = tmpMap.(Map)[k] - iter.next() - } else { - delete(tmpMap.(Map), k) - break - } - } - - return nil -} - -func (m Map) Clear() error { - for k := range m { - delete(m, k) - } - - return nil -} - -func (m Map) toMap(value interface{}) interface{} { - switch value.(type) { - case Map: - ret := map[string]interface{}{} - for k, v := range value.(Map) { - ret[k] = m.toMap(v) - } - return ret - case []interface{}: - ret := make([]interface{}, 0) - for _, v := range value.([]interface{}) { - ret = append(ret, m.toMap(v)) - } - return ret - case bool, string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr, float32, float64, complex64, complex128: - return value - default: - return value - } -} - -func (m Map) ToMap() map[string]interface{} { - return m.toMap(m).(map[string]interface{}) -} - -func (m Map) ToJSON() ([]byte, error) { - mm := m.ToMap() - b, err := json.Marshal(&mm) - if err != nil { - return nil, err - } - - return b, nil -} - -func (m Map) ToYAML() ([]byte, error) { - mm := m.ToMap() - b, err := yaml.Marshal(&mm) - if err != nil { - return nil, err - } - - return b, nil -} - -type iterator struct { - keys []string - pos int -} - -func newIterator(keys []string) *iterator { - return &iterator{ - keys: keys, - pos: 0, - } -} - -func (i *iterator) hasNext() bool { - return i.pos < len(i.keys)-1 -} - -func (i *iterator) next() bool { - i.pos++ - return i.pos < len(i.keys)-1 -} - -func (i *iterator) value() (string, error) { - if i.pos > len(i.keys)-1 { - return "", errors.New("value is not valid after iterator finished") - } - return i.keys[i.pos], nil -} diff --git a/maputils/maputils_test.go b/maputils/maputils_test.go deleted file mode 100644 index d71e400..0000000 --- a/maputils/maputils_test.go +++ /dev/null @@ -1,679 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package maputils - -import ( - "bytes" - "reflect" - "testing" -) - -func Test_splitKey(t *testing.T) { - key1 := "/a/b/c/d" - keys1 := splitKey(key1) - exp1 := []string{"a", "b", "c", "d"} - act1 := keys1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - key2 := "/" - keys2 := splitKey(key2) - exp2 := make([]string, 0) - act2 := keys2 - if !reflect.DeepEqual(exp2, act2) { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } - - key3 := "" - keys3 := splitKey(key3) - exp3 := make([]string, 0) - act3 := keys3 - if !reflect.DeepEqual(exp3, act3) { - t.Fatalf("expected content to see %v, saw %v", exp3, act3) - } -} - -func Test_makeSelector(t *testing.T) { - key1 := "/a/b/c/d" - selector1 := makeSelector(key1) - exp1 := "a.b.c.d" - act1 := selector1 - if exp1 != act1 { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - key2 := "/" - selector2 := makeSelector(key2) - exp2 := "" - act2 := selector2 - if exp2 != act2 { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } - - key3 := "" - selector3 := makeSelector(key3) - exp3 := "" - act3 := selector3 - if exp3 != act3 { - t.Fatalf("expected content to see %v, saw %v", exp3, act3) - } -} - -func Test_normalize(t *testing.T) { - data1 := map[string]interface{}{ - "a": map[string]interface{}{ - "b": map[string]interface{}{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - val1 := normalize(data1) - exp1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } -} - -func Test_makeMap(t *testing.T) { - val1 := makeMap("/a/b/c", "C").(Map) - exp1 := Map{ - "a": Map{ - "b": Map{ - "c": "C", - }, - }, - } - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - val2 := makeMap("a/b", map[string]interface{}{"c": "C"}).(Map) - exp2 := Map{ - "a": Map{ - "b": Map{ - "c": "C", - }, - }, - } - act2 := val2 - if !reflect.DeepEqual(exp2, act2) { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } -} - -func TestMap_FromMap(t *testing.T) { - map1 := FromMap(map[string]interface{}{ - "a": map[string]interface{}{ - "b": map[string]interface{}{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - }) - exp1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - act1 := map1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } -} - -func TestMap_ToMap(t *testing.T) { - map1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - val1 := map1.ToMap() - exp1 := map[string]interface{}{ - "a": map[string]interface{}{ - "b": map[string]interface{}{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } -} - -func Test_FromYAML(t *testing.T) { - map1, err := FromYAML([]byte(`a: - b: - c: abc - d: abd - e: - - ae1 - - ae2 -`)) - if err != nil { - t.Fatalf("%v", err) - } - exp1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - act1 := map1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } -} - -func Test_ToYAML(t *testing.T) { - map1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - - val1, err := map1.ToYAML() - if err != nil { - t.Fatalf("%v", err) - } - exp1 := []byte(`a: - b: - c: abc - d: abd - e: - - ae1 - - ae2 -`) - act1 := val1 - if !bytes.Equal(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } -} - -func Test_FromJSON(t *testing.T) { - map1, err := FromJSON([]byte(`{"a":{"b":{"c":"abc","d":"abd"},"e":["ae1","ae2"]}}`)) - if err != nil { - t.Fatalf("%v", err) - } - exp1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - act1 := map1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } -} - -func Test_ToJSON(t *testing.T) { - map1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - val1, err := map1.ToJSON() - if err != nil { - t.Fatalf("%v", err) - } - exp1 := []byte(`{"a":{"b":{"c":"abc","d":"abd"},"e":["ae1","ae2"]}}`) - act1 := val1 - if !bytes.Equal(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } -} - -func Test_Has(t *testing.T) { - map1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - - val1, err := map1.Has("a/b/c") - if err != nil { - t.Fatalf("%v", err) - } - exp1 := true - act1 := val1 - if exp1 != act1 { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - val2, err := map1.Get("a/b/f") - if err != ErrNotFound { - t.Fatalf("%v", err) - } - exp2 := false - act2 := val2 - if exp2 == act2 { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } -} - -func Test_Set(t *testing.T) { - map1 := Map{} - - err := map1.Set("/", Map{"a": "A"}) - if err != nil { - t.Fatalf("%v", err) - } - exp1 := Map{ - "a": "A", - } - act1 := map1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - err = map1.Set("/", Map{"A": "a"}) - if err != nil { - t.Fatalf("%v", err) - } - exp2 := Map{ - "A": "a", - } - act2 := map1 - if !reflect.DeepEqual(exp2, act2) { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } - - err = map1.Set("/", Map{"A": 1}) - if err != nil { - t.Fatalf("%v", err) - } - exp3 := Map{ - "A": 1, - } - act3 := map1 - if !reflect.DeepEqual(exp3, act3) { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } - - err = map1.Set("/A", "AAA") - if err != nil { - t.Fatalf("%v", err) - } - exp4 := Map{ - "A": "AAA", - } - act4 := map1 - if !reflect.DeepEqual(exp4, act4) { - t.Fatalf("expected content to see %v, saw %v", exp4, act4) - } - - err = map1.Set("/B", "BBB") - if err != nil { - t.Fatalf("%v", err) - } - exp5 := Map{ - "A": "AAA", - "B": "BBB", - } - act5 := map1 - if !reflect.DeepEqual(exp5, act5) { - t.Fatalf("expected content to see %v, saw %v", exp5, act5) - } - - err = map1.Set("/C", map[string]interface{}{"D": "CCC-DDD"}) - if err != nil { - t.Fatalf("%v", err) - } - exp6 := Map{ - "A": "AAA", - "B": "BBB", - "C": Map{ - "D": "CCC-DDD", - }, - } - act6 := map1 - if !reflect.DeepEqual(exp6, act6) { - t.Fatalf("expected content to see %v, saw %v", exp6, act6) - } -} - -func Test_Merge(t *testing.T) { - map1 := Map{} - - err := map1.Merge("/", Map{"a": "A"}) - if err != nil { - t.Fatalf("%v", err) - } - exp1 := Map{ - "a": "A", - } - act1 := map1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - err = map1.Merge("/a", "a") - if err != nil { - t.Fatalf("%v", err) - } - exp2 := Map{ - "a": "a", - } - act2 := map1 - if !reflect.DeepEqual(exp2, act2) { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } - - err = map1.Merge("/", Map{"a": 1}) - if err != nil { - t.Fatalf("%v", err) - } - exp3 := Map{ - "a": 1, - } - act3 := map1 - if !reflect.DeepEqual(exp3, act3) { - t.Fatalf("expected content to see %v, saw %v", exp3, act3) - } - - err = map1.Merge("/", Map{"b": 2}) - if err != nil { - t.Fatalf("%v", err) - } - exp4 := Map{ - "a": 1, - "b": 2, - } - act4 := map1 - if !reflect.DeepEqual(exp4, act4) { - t.Fatalf("expected content to see %v, saw %v", exp4, act4) - } - - err = map1.Merge("/c", 3) - if err != nil { - t.Fatalf("%v", err) - } - exp5 := Map{ - "a": 1, - "b": 2, - "c": 3, - } - act5 := map1 - if !reflect.DeepEqual(exp5, act5) { - t.Fatalf("expected content to see %v, saw %v", exp5, act5) - } - -} - -func Test_Get(t *testing.T) { - map1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - - val1, err := map1.Get("a/b/c") - if err != nil { - t.Fatalf("%v", err) - } - exp1 := "abc" - act1 := val1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - - val2, err := map1.Get("a") - if err != nil { - t.Fatalf("%v", err) - } - exp2 := map[string]interface{}{ - "b": map[string]interface{}{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - } - act2 := val2 - if !reflect.DeepEqual(exp2, act2) { - t.Fatalf("expected content to see %v, saw %v", exp2, act2) - } -} - -func Test_Delete(t *testing.T) { - map1 := Map{ - "a": Map{ - "b": Map{ - "c": "abc", - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - - err := map1.Delete("a/b/c") - if err != nil { - t.Fatalf("%v", err) - } - exp1 := Map{ - "a": Map{ - "b": Map{ - "d": "abd", - }, - "e": []interface{}{ - "ae1", - "ae2", - }, - }, - } - act1 := map1 - if !reflect.DeepEqual(exp1, act1) { - t.Fatalf("expected content to see %v, saw %v", exp1, act1) - } - -} - -//func Test_Get(t *testing.T) { -// data1 := objx.Map{ -// "a": objx.Map{ -// "b": objx.Map{ -// "c": "abc", -// "d": "abd", -// }, -// "e": []interface{}{ -// "ae1", -// "ae2", -// }, -// }, -// } -// key1 := "/" -// val1, err := Get(data1, key1) -// if err != nil { -// t.Fatalf("%v", err) -// } -// exp1 := map[string]interface{}{ -// "a": map[string]interface{}{ -// "b": map[string]interface{}{ -// "c": "abc", -// "d": "abd", -// }, -// "e": []interface{}{ -// "ae1", -// "ae2", -// }, -// }, -// } -// act1 := val1 -// if !reflect.DeepEqual(exp1, act1) { -// t.Fatalf("expected content to see %v, saw %v", exp1, act1) -// } -// -// key2 := "/a" -// val2, err := Get(data1, key2) -// if err != nil { -// t.Fatalf("%v", err) -// } -// exp2 := map[string]interface{}{ -// "b": map[string]interface{}{ -// "c": "abc", -// "d": "abd", -// }, -// "e": []interface{}{ -// "ae1", -// "ae2", -// }, -// } -// act2 := val2 -// if !reflect.DeepEqual(exp2, act2) { -// t.Fatalf("expected content to see %v, saw %v", exp2, act2) -// } -//} - -//func Test_Set(t *testing.T) { -// data := map[string]interface{}{} -// -// data, err := Set(data, "/", map[string]interface{}{"a": 1}, true) -// if err != nil { -// t.Fatalf("%v", err) -// } -// -// exp1 := 1 -// act1 := val1 -// if exp1 != act1 { -// t.Fatalf("expected content to see %v, saw %v", exp1, act1) -// } -// -// fsm.applySet("/b/bb", map[string]interface{}{"b": 1}, false) -// -// val2, err := fsm.Get("/b") -// if err != nil { -// t.Fatalf("%v", err) -// } -// -// exp2 := map[string]interface{}{"bb": map[string]interface{}{"b": 1}} -// act2 := val2.(map[string]interface{}) -// if !reflect.DeepEqual(exp2, act2) { -// t.Fatalf("expected content to see %v, saw %v", exp2, act2) -// } -// -// fsm.applySet("/", map[string]interface{}{"a": 1}, false) -// -// val3, err := fsm.Get("/") -// if err != nil { -// t.Fatalf("%v", err) -// } -// -// exp3 := map[string]interface{}{"a": 1} -// act3 := val3 -// if !reflect.DeepEqual(exp3, act3) { -// t.Fatalf("expected content to see %v, saw %v", exp3, act3) -// } -// -// fsm.applySet("/", map[string]interface{}{"b": 2}, true) -// -// val4, err := fsm.Get("/") -// if err != nil { -// t.Fatalf("%v", err) -// } -// -// exp4 := map[string]interface{}{"a": 1, "b": 2} -// act4 := val4 -// if !reflect.DeepEqual(exp4, act4) { -// t.Fatalf("expected content to see %v, saw %v", exp4, act4) -// } -//} diff --git a/marshaler/marshaler.go b/marshaler/marshaler.go new file mode 100644 index 0000000..22c615c --- /dev/null +++ b/marshaler/marshaler.go @@ -0,0 +1,186 @@ +package marshaler + +import ( + "bufio" + "bytes" + "encoding/json" + "io" + "io/ioutil" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/protobuf" +) + +var ( + DefaultContentType = "application/json" +) + +type BlastMarshaler struct{} + +func (*BlastMarshaler) ContentType() string { + return DefaultContentType +} + +func (m *BlastMarshaler) Marshal(v interface{}) ([]byte, error) { + switch v.(type) { + case *protobuf.GetResponse: + var fields map[string]interface{} + if err := json.Unmarshal(v.(*protobuf.GetResponse).Fields, &fields); err != nil { + return nil, err + } + resp := map[string]interface{}{ + "fields": fields, + } + if value, err := json.Marshal(resp); err == nil { + return value, nil + } else { + return nil, err + } + case *protobuf.SearchResponse: + var searchResult map[string]interface{} + if err := json.Unmarshal(v.(*protobuf.SearchResponse).SearchResult, &searchResult); err != nil { + return nil, err + } + resp := map[string]interface{}{ + "search_result": searchResult, + } + if value, err := json.Marshal(resp); err == nil { + return value, nil + } else { + return nil, err + } + case *protobuf.MappingResponse: + var m map[string]interface{} + if err := json.Unmarshal(v.(*protobuf.MappingResponse).Mapping, &m); err != nil { + return nil, err + } + resp := map[string]interface{}{ + "mapping": m, + } + if value, err := json.Marshal(resp); err == nil { + return value, nil + } else { + return nil, err + } + case *protobuf.MetricsResponse: + value := v.(*protobuf.MetricsResponse).Metrics + return value, nil + default: + return json.Marshal(v) + } +} + +func (m *BlastMarshaler) Unmarshal(data []byte, v interface{}) error { + switch v.(type) { + case *protobuf.SetRequest: + var m map[string]interface{} + if err := json.Unmarshal(data, &m); err != nil { + return err + } + + if i, ok := m["id"].(string); ok { + v.(*protobuf.SetRequest).Id = i + } + + if f, ok := m["fields"].(map[string]interface{}); ok { + fieldsBytes, err := json.Marshal(f) + if err != nil { + return err + } + v.(*protobuf.SetRequest).Fields = fieldsBytes + } + return nil + case *protobuf.BulkIndexRequest: + v.(*protobuf.BulkIndexRequest).Requests = make([]*protobuf.SetRequest, 0) + + reader := bufio.NewReader(bytes.NewReader(data)) + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + r := &protobuf.SetRequest{} + if err := m.Unmarshal(docBytes, r); err != nil { + continue + } + v.(*protobuf.BulkIndexRequest).Requests = append(v.(*protobuf.BulkIndexRequest).Requests, r) + } + break + } + } + if len(docBytes) > 0 { + r := &protobuf.SetRequest{} + if err := m.Unmarshal(docBytes, r); err != nil { + continue + } + v.(*protobuf.BulkIndexRequest).Requests = append(v.(*protobuf.BulkIndexRequest).Requests, r) + } + } + return nil + case *protobuf.BulkDeleteRequest: + v.(*protobuf.BulkDeleteRequest).Requests = make([]*protobuf.DeleteRequest, 0) + + reader := bufio.NewReader(bytes.NewReader(data)) + for { + docBytes, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF || err == io.ErrClosedPipe { + if len(docBytes) > 0 { + r := &protobuf.DeleteRequest{ + Id: strings.TrimSpace(string(docBytes)), + } + v.(*protobuf.BulkDeleteRequest).Requests = append(v.(*protobuf.BulkDeleteRequest).Requests, r) + } + break + } + } + if len(docBytes) > 0 { + r := &protobuf.DeleteRequest{ + Id: strings.TrimSpace(string(docBytes)), + } + v.(*protobuf.BulkDeleteRequest).Requests = append(v.(*protobuf.BulkDeleteRequest).Requests, r) + } + } + return nil + case *protobuf.SearchRequest: + var m map[string]interface{} + if err := json.Unmarshal(data, &m); err != nil { + return err + } + f, ok := m["search_request"] + if !ok { + return errors.ErrNil + } + searchRequestBytes, err := json.Marshal(f) + if err != nil { + return err + } + v.(*protobuf.SearchRequest).SearchRequest = searchRequestBytes + return nil + default: + return json.Unmarshal(data, v) + } +} + +func (m *BlastMarshaler) NewDecoder(r io.Reader) runtime.Decoder { + return runtime.DecoderFunc( + func(v interface{}) error { + buffer, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + return m.Unmarshal(buffer, v) + }, + ) +} + +func (m *BlastMarshaler) NewEncoder(w io.Writer) runtime.Encoder { + return json.NewEncoder(w) +} + +func (m *BlastMarshaler) Delimiter() []byte { + return []byte("\n") +} diff --git a/marshaler/util.go b/marshaler/util.go new file mode 100644 index 0000000..e935b8b --- /dev/null +++ b/marshaler/util.go @@ -0,0 +1,69 @@ +package marshaler + +import ( + "encoding/json" + "reflect" + + "github.com/golang/protobuf/ptypes/any" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/registry" +) + +func init() { + registry.RegisterType("protobuf.LivenessCheckResponse", reflect.TypeOf(protobuf.LivenessCheckResponse{})) + registry.RegisterType("protobuf.ReadinessCheckResponse", reflect.TypeOf(protobuf.ReadinessCheckResponse{})) + registry.RegisterType("protobuf.Metadata", reflect.TypeOf(protobuf.Metadata{})) + registry.RegisterType("protobuf.Node", reflect.TypeOf(protobuf.Node{})) + registry.RegisterType("protobuf.Cluster", reflect.TypeOf(protobuf.Cluster{})) + registry.RegisterType("protobuf.JoinRequest", reflect.TypeOf(protobuf.JoinRequest{})) + registry.RegisterType("protobuf.LeaveRequest", reflect.TypeOf(protobuf.LeaveRequest{})) + registry.RegisterType("protobuf.NodeResponse", reflect.TypeOf(protobuf.NodeResponse{})) + registry.RegisterType("protobuf.ClusterResponse", reflect.TypeOf(protobuf.ClusterResponse{})) + registry.RegisterType("protobuf.GetRequest", reflect.TypeOf(protobuf.GetRequest{})) + registry.RegisterType("protobuf.GetResponse", reflect.TypeOf(protobuf.GetResponse{})) + registry.RegisterType("protobuf.SetRequest", reflect.TypeOf(protobuf.SetRequest{})) + registry.RegisterType("protobuf.DeleteRequest", reflect.TypeOf(protobuf.DeleteRequest{})) + registry.RegisterType("protobuf.BulkIndexRequest", reflect.TypeOf(protobuf.BulkIndexRequest{})) + registry.RegisterType("protobuf.BulkDeleteRequest", reflect.TypeOf(protobuf.BulkDeleteRequest{})) + registry.RegisterType("protobuf.SetMetadataRequest", reflect.TypeOf(protobuf.SetMetadataRequest{})) + registry.RegisterType("protobuf.DeleteMetadataRequest", reflect.TypeOf(protobuf.DeleteMetadataRequest{})) + registry.RegisterType("protobuf.Event", reflect.TypeOf(protobuf.Event{})) + registry.RegisterType("protobuf.WatchResponse", reflect.TypeOf(protobuf.WatchResponse{})) + registry.RegisterType("protobuf.MetricsResponse", reflect.TypeOf(protobuf.MetricsResponse{})) + registry.RegisterType("protobuf.Document", reflect.TypeOf(protobuf.Document{})) + registry.RegisterType("map[string]interface {}", reflect.TypeOf((map[string]interface{})(nil))) +} + +func MarshalAny(message *any.Any) (interface{}, error) { + if message == nil { + return nil, nil + } + + typeUrl := message.TypeUrl + value := message.Value + + instance := registry.TypeInstanceByName(typeUrl) + + if err := json.Unmarshal(value, instance); err != nil { + return nil, err + } else { + return instance, nil + } + +} + +func UnmarshalAny(instance interface{}, message *any.Any) error { + if instance == nil { + return nil + } + + value, err := json.Marshal(instance) + if err != nil { + return err + } + + message.TypeUrl = registry.TypeNameByInstance(instance) + message.Value = value + + return nil +} diff --git a/marshaler/util_test.go b/marshaler/util_test.go new file mode 100644 index 0000000..da72cd4 --- /dev/null +++ b/marshaler/util_test.go @@ -0,0 +1,109 @@ +package marshaler + +import ( + "bytes" + "testing" + + "github.com/golang/protobuf/ptypes/any" + "github.com/mosuka/blast/protobuf" +) + +func TestMarshalAny(t *testing.T) { + // test map[string]interface{} + data := map[string]interface{}{"a": 1, "b": 2, "c": 3} + + mapAny := &any.Any{} + err := UnmarshalAny(data, mapAny) + if err != nil { + t.Errorf("%v", err) + } + + expectedType := "map[string]interface {}" + actualType := mapAny.TypeUrl + if expectedType != actualType { + t.Errorf("expected content to see %s, saw %s", expectedType, actualType) + } + + expectedValue := []byte(`{"a":1,"b":2,"c":3}`) + actualValue := mapAny.Value + if !bytes.Equal(expectedValue, actualValue) { + t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) + } + + // test kvs.Node + node := &protobuf.Node{ + RaftAddress: ":7000", + State: "Leader", + Metadata: &protobuf.Metadata{ + GrpcAddress: ":9000", + HttpAddress: ":8000", + }, + } + + nodeAny := &any.Any{} + err = UnmarshalAny(node, nodeAny) + if err != nil { + t.Errorf("%v", err) + } + + expectedType = "protobuf.Node" + actualType = nodeAny.TypeUrl + if expectedType != actualType { + t.Errorf("expected content to see %s, saw %s", expectedType, actualType) + } + + expectedValue = []byte(`{"raft_address":":7000","metadata":{"grpc_address":":9000","http_address":":8000"},"state":"Leader"}`) + actualValue = nodeAny.Value + if !bytes.Equal(expectedValue, actualValue) { + t.Errorf("expected content to see %v, saw %v", expectedValue, actualValue) + } +} + +func TestUnmarshalAny(t *testing.T) { + // test map[string]interface{} + dataAny := &any.Any{ + TypeUrl: "map[string]interface {}", + Value: []byte(`{"a":1,"b":2,"c":3}`), + } + + data, err := MarshalAny(dataAny) + if err != nil { + t.Errorf("%v", err) + } + dataMap := *data.(*map[string]interface{}) + + if dataMap["a"] != float64(1) { + t.Errorf("expected content to see %v, saw %v", 1, dataMap["a"]) + } + if dataMap["b"] != float64(2) { + t.Errorf("expected content to see %v, saw %v", 2, dataMap["b"]) + } + if dataMap["c"] != float64(3) { + t.Errorf("expected content to see %v, saw %v", 3, dataMap["c"]) + } + + // raft.Node + dataAny = &any.Any{ + TypeUrl: "protobuf.Node", + Value: []byte(`{"raft_address":":7000","metadata":{"grpc_address":":9000","http_address":":8000"},"state":"Leader"}`), + } + + data, err = MarshalAny(dataAny) + if err != nil { + t.Errorf("%v", err) + } + node := data.(*protobuf.Node) + + if node.RaftAddress != ":7000" { + t.Errorf("expected content to see %v, saw %v", ":7000", node.RaftAddress) + } + if node.Metadata.GrpcAddress != ":9000" { + t.Errorf("expected content to see %v, saw %v", ":9000", node.Metadata.GrpcAddress) + } + if node.Metadata.HttpAddress != ":8000" { + t.Errorf("expected content to see %v, saw %v", ":8000", node.Metadata.HttpAddress) + } + if node.State != "Leader" { + t.Errorf("expected content to see %v, saw %v", "Leader", node.State) + } +} diff --git a/metric/metric.go b/metric/metric.go new file mode 100644 index 0000000..9e6ba20 --- /dev/null +++ b/metric/metric.go @@ -0,0 +1,895 @@ +package metric + +import ( + grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + // Create a metrics registry. + Registry = prometheus.NewRegistry() + + // Create some standard server metrics. + GrpcMetrics = grpcprometheus.NewServerMetrics( + func(o *prometheus.CounterOpts) { + o.Namespace = "blast" + }, + ) + + // Raft node state metric + RaftStateMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "state", + Help: "Node state. 0:Follower, 1:Candidate, 2:Leader, 3:Shutdown", + }, []string{"id"}) + + RaftTermMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "term", + Help: "Term.", + }, []string{"id"}) + + RaftLastLogIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "last_log_index", + Help: "Last log index.", + }, []string{"id"}) + + RaftLastLogTermMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "last_log_term", + Help: "Last log term.", + }, []string{"id"}) + + RaftCommitIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "commit_index", + Help: "Commit index.", + }, []string{"id"}) + + RaftAppliedIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "applied_index", + Help: "Applied index.", + }, []string{"id"}) + + RaftFsmPendingMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "fsm_pending", + Help: "FSM pending.", + }, []string{"id"}) + + RaftLastSnapshotIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "last_snapshot_index", + Help: "Last snapshot index.", + }, []string{"id"}) + + RaftLastSnapshotTermMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "last_snapshot_term", + Help: "Last snapshot term.", + }, []string{"id"}) + + RaftLatestConfigurationIndexMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "latest_configuration_index", + Help: "Latest configuration index.", + }, []string{"id"}) + + RaftNumPeersMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "num_peers", + Help: "Number of peers.", + }, []string{"id"}) + + RaftLastContactMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "last_copntact", + Help: "Last contact.", + }, []string{"id"}) + + RaftNumNodesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "raft", + Name: "num_nodes", + Help: "Number of nodes.", + }, []string{"id"}) + + IndexCurOnDiskBytesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "cur_on_disk_bytes", + Help: "cur_on_disk_bytes", + }, []string{"id"}) + + IndexCurOnDiskFilesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "cur_on_disk_files", + Help: "cur_on_disk_files", + }, []string{"id"}) + + IndexCurRootEpochMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "cur_root_epoch", + Help: "cur_root_epoch", + }, []string{"id"}) + + IndexLastMergedEpochMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "last_merged_epoch", + Help: "last_merged_epoch", + }, []string{"id"}) + + IndexLastPersistedEpochMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "last_persisted_epoch", + Help: "last_persisted_epoch", + }, []string{"id"}) + + IndexMaxBatchIntroTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "max_batch_intro_time", + Help: "max_batch_intro_time", + }, []string{"id"}) + + IndexMaxFileMergeZapTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "max_file_merge_zap_time", + Help: "max_file_merge_zap_time", + }, []string{"id"}) + + IndexMaxMemMergeZapTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "max_mem_merge_zap_time", + Help: "max_mem_merge_zap_time", + }, []string{"id"}) + + IndexTotAnalysisTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_analysis_time", + Help: "tot_analysis_time", + }, []string{"id"}) + + IndexTotBatchIntroTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_batch_intro_time", + Help: "tot_batch_intro_time", + }, []string{"id"}) + + IndexTotBatchesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_batches", + Help: "tot_batches", + }, []string{"id"}) + + IndexTotBatchesEmptyMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_batches_empty", + Help: "tot_batches_empty", + }, []string{"id"}) + + IndexTotDeletesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_deletes", + Help: "tot_deletes", + }, []string{"id"}) + + IndexTotFileMergeIntroductionsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_introductions", + Help: "tot_file_merge_introductions", + }, []string{"id"}) + + IndexTotFileMergeIntroductionsDoneMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_introductions_done", + Help: "tot_file_merge_introductions_done", + }, []string{"id"}) + + IndexTotFileMergeIntroductionsSkippedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_introductions_skipped", + Help: "tot_file_merge_introductions_skipped", + }, []string{"id"}) + + IndexTotFileMergeLoopBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_loop_beg", + Help: "tot_file_merge_loop_beg", + }, []string{"id"}) + + IndexTotFileMergeLoopEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_loop_end", + Help: "tot_file_merge_loop_end", + }, []string{"id"}) + + IndexTotFileMergeLoopErrMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_loop_err", + Help: "tot_file_merge_loop_err", + }, []string{"id"}) + + IndexTotFileMergePlanMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan", + Help: "tot_file_merge_plan", + }, []string{"id"}) + + IndexTotFileMergePlanErrMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_err", + Help: "tot_file_merge_plan_err", + }, []string{"id"}) + + IndexTotFileMergePlanNoneMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_none", + Help: "tot_file_merge_plan_none", + }, []string{"id"}) + + IndexTotFileMergePlanOkMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_ok", + Help: "tot_file_merge_plan_ok", + }, []string{"id"}) + + IndexTotFileMergePlanTasksMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_tasks", + Help: "tot_file_merge_plan_tasks", + }, []string{"id"}) + + IndexTotFileMergePlanTasksDoneMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_tasks_done", + Help: "tot_file_merge_plan_tasks_done", + }, []string{"id"}) + + IndexTotFileMergePlanTasksErrMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_tasks_err", + Help: "tot_file_merge_plan_tasks_err", + }, []string{"id"}) + + IndexTotFileMergePlanTasksSegmentsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_tasks_segments", + Help: "tot_file_merge_plan_tasks_segments", + }, []string{"id"}) + + IndexTotFileMergePlanTasksSegmentsEmptyMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_plan_tasks_segments_empty", + Help: "tot_file_merge_plan_tasks_segments_empty", + }, []string{"id"}) + + IndexTotFileMergeSegmentsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_segments", + Help: "tot_file_merge_segments", + }, []string{"id"}) + + IndexTotFileMergeSegmentsEmptyMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_segments_empty", + Help: "tot_file_merge_segments_empty", + }, []string{"id"}) + + IndexTotFileMergeWrittenBytesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_written_bytes", + Help: "tot_file_merge_written_bytes", + }, []string{"id"}) + + IndexTotFileMergeZapBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_zap_beg", + Help: "tot_file_merge_zap_beg", + }, []string{"id"}) + + IndexTotFileMergeZapEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_zap_end", + Help: "tot_file_merge_zap_end", + }, []string{"id"}) + + IndexTotFileMergeZapTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_merge_zap_time", + Help: "tot_file_merge_zap_time", + }, []string{"id"}) + + IndexTotFileSegmentsAtRootMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_file_segments_at_root", + Help: "tot_file_segments_at_root", + }, []string{"id"}) + + IndexTotIndexTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_index_time", + Help: "tot_index_time", + }, []string{"id"}) + + IndexTotIndexedPlainTextBytesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_indexed_plain_text_bytes", + Help: "tot_indexed_plain_text_bytes", + }, []string{"id"}) + + IndexTotIntroduceLoopMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_loop", + Help: "tot_introduce_loop", + }, []string{"id"}) + + IndexTotIntroduceMergeBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_merge_beg", + Help: "tot_introduce_merge_beg", + }, []string{"id"}) + + IndexTotIntroduceMergeEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_merge_end", + Help: "tot_introduce_merge_end", + }, []string{"id"}) + + IndexTotIntroducePersistBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_persist_beg", + Help: "tot_introduce_persist_beg", + }, []string{"id"}) + + IndexTotIntroducePersistEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_persist_end", + Help: "tot_introduce_persist_end", + }, []string{"id"}) + + IndexTotIntroduceRevertBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_revert_beg", + Help: "tot_introduce_revert_beg", + }, []string{"id"}) + + IndexTotIntroduceRevertEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_revert_end", + Help: "tot_introduce_revert_end", + }, []string{"id"}) + + IndexTotIntroduceSegmentBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_segment_beg", + Help: "tot_introduce_segment_beg", + }, []string{"id"}) + + IndexTotIntroduceSegmentEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduce_segment_end", + Help: "tot_introduce_segment_end", + }, []string{"id"}) + + IndexTotIntroducedItemsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduced_items", + Help: "tot_introduced_items", + }, []string{"id"}) + + IndexTotIntroducedSegmentsBatchMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduced_segments_batch", + Help: "tot_introduced_segments_batch", + }, []string{"id"}) + + IndexTotIntroducedSegmentsMergeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_introduced_segments_merge", + Help: "tot_introduced_segments_merge", + }, []string{"id"}) + + IndexTotItemsToPersistMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_items_to_persist", + Help: "tot_items_to_persist", + }, []string{"id"}) + + IndexTotMemMergeBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_beg", + Help: "tot_mem_merge_beg", + }, []string{"id"}) + + IndexTotMemMergeDoneMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_done", + Help: "tot_mem_merge_done", + }, []string{"id"}) + + IndexTotMemMergeErrMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_err", + Help: "tot_mem_merge_err", + }, []string{"id"}) + + IndexTotMemMergeSegmentsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_segments", + Help: "tot_mem_merge_segments", + }, []string{"id"}) + + IndexTotMemMergeZapBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_zap_beg", + Help: "tot_mem_merge_zap_beg", + }, []string{"id"}) + + IndexTotMemMergeZapEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_zap_end", + Help: "tot_mem_merge_zap_end", + }, []string{"id"}) + + IndexTotMemMergeZapTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_mem_merge_zap_time", + Help: "tot_mem_merge_zap_time", + }, []string{"id"}) + + IndexTotMemorySegmentsAtRootMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_memory_segments_at_root", + Help: "tot_memory_segments_at_root", + }, []string{"id"}) + + IndexTotOnErrorsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_on_errors", + Help: "tot_on_errors", + }, []string{"id"}) + + IndexTotPersistLoopBegMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persist_loop_beg", + Help: "tot_persist_loop_beg", + }, []string{"id"}) + + IndexTotPersistLoopEndMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persist_loop_end", + Help: "tot_persist_loop_end", + }, []string{"id"}) + + IndexTotPersistLoopErrMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persist_loop_err", + Help: "tot_persist_loop_err", + }, []string{"id"}) + + IndexTotPersistLoopProgressMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persist_loop_progress", + Help: "tot_persist_loop_progress", + }, []string{"id"}) + + IndexTotPersistLoopWaitMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persist_loop_wait", + Help: "tot_persist_loop_wait", + }, []string{"id"}) + + IndexTotPersistLoopWaitNotifiedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persist_loop_wait_notified", + Help: "tot_persist_loop_wait_notified", + }, []string{"id"}) + + IndexTotPersistedItemsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persisted_items", + Help: "tot_persisted_items", + }, []string{"id"}) + + IndexTotPersistedSegmentsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persisted_segments", + Help: "tot_persisted_segments", + }, []string{"id"}) + + IndexTotPersisterMergerNapBreakMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persister_merger_nap_break", + Help: "tot_persister_merger_nap_break", + }, []string{"id"}) + + IndexTotPersisterNapPauseCompletedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persister_nap_pause_completed", + Help: "tot_persister_nap_pause_completed", + }, []string{"id"}) + + IndexTotPersisterSlowMergerPauseMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persister_slow_merger_pause", + Help: "tot_persister_slow_merger_pause", + }, []string{"id"}) + + IndexTotPersisterSlowMergerResumeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_persister_slow_merger_resume", + Help: "tot_persister_slow_merger_resume", + }, []string{"id"}) + + IndexTotTermSearchersFinishedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_term_searchers_finished", + Help: "tot_term_searchers_finished", + }, []string{"id"}) + + IndexTotTermSearchersStartedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_term_searchers_started", + Help: "tot_term_searchers_started", + }, []string{"id"}) + + IndexTotUpdatesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "tot_updates", + Help: "tot_updates", + }, []string{"id"}) + + IndexAnalysisTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "analysis_time", + Help: "analysis_time", + }, []string{"id"}) + + IndexBatchesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "batches", + Help: "batches", + }, []string{"id"}) + + IndexDeletesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "deletes", + Help: "deletes", + }, []string{"id"}) + + IndexErrorsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "errors", + Help: "errors", + }, []string{"id"}) + + IndexIndexTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "index_time", + Help: "index_time", + }, []string{"id"}) + + IndexNumBytesUsedDiskMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_bytes_used_disk", + Help: "num_bytes_used_disk", + }, []string{"id"}) + + IndexNumFilesOnDiskMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_files_on_disk", + Help: "num_files_on_disk", + }, []string{"id"}) + + IndexNumItemsIntroducedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_items_introduced", + Help: "num_items_introduced", + }, []string{"id"}) + + IndexNumItemsPersistedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_items_persisted", + Help: "num_items_persisted", + }, []string{"id"}) + + IndexNumPersisterNapMergerBreakMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_persister_nap_merger_break", + Help: "num_persister_nap_merger_break", + }, []string{"id"}) + + IndexNumPersisterNapPauseCompletedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_persister_nap_pause_completed", + Help: "num_persister_nap_pause_completed", + }, []string{"id"}) + + IndexNumPlainTextBytesIndexedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_plain_text_bytes_indexed", + Help: "num_plain_text_bytes_indexed", + }, []string{"id"}) + + IndexNumRecsToPersistMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_recs_to_persist", + Help: "num_recs_to_persist", + }, []string{"id"}) + + IndexNumRootFilesegmentsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_root_filesegments", + Help: "num_root_filesegments", + }, []string{"id"}) + + IndexNumRootMemorysegmentsMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "num_root_memorysegments", + Help: "num_root_memorysegments", + }, []string{"id"}) + + IndexTermSearchersFinishedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "term_searchers_finished", + Help: "term_searchers_finished", + }, []string{"id"}) + + IndexTermSearchersStartedMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "term_searchers_started", + Help: "term_searchers_started", + }, []string{"id"}) + + IndexTotalCompactionWrittenBytesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "total_compaction_written_bytes", + Help: "total_compaction_written_bytes", + }, []string{"id"}) + + IndexUpdatesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "updates", + Help: "updates", + }, []string{"id"}) + + SearchTimeMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "search_time", + Help: "search_time", + }, []string{"id"}) + + SearchesMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "blast", + Subsystem: "index", + Name: "searches", + Help: "searches", + }, []string{"id"}) +) + +func init() { + // Register standard server metrics and customized metrics to registry. + Registry.MustRegister( + GrpcMetrics, + RaftStateMetric, + RaftTermMetric, + RaftLastLogIndexMetric, + RaftLastLogTermMetric, + RaftCommitIndexMetric, + RaftAppliedIndexMetric, + RaftFsmPendingMetric, + RaftLastSnapshotIndexMetric, + RaftLastSnapshotTermMetric, + RaftLatestConfigurationIndexMetric, + RaftNumPeersMetric, + RaftLastContactMetric, + RaftNumNodesMetric, + IndexCurOnDiskBytesMetric, + IndexCurOnDiskFilesMetric, + IndexCurRootEpochMetric, + IndexLastMergedEpochMetric, + IndexLastPersistedEpochMetric, + IndexMaxBatchIntroTimeMetric, + IndexMaxFileMergeZapTimeMetric, + IndexMaxMemMergeZapTimeMetric, + IndexTotAnalysisTimeMetric, + IndexTotBatchIntroTimeMetric, + IndexTotBatchesMetric, + IndexTotBatchesEmptyMetric, + IndexTotDeletesMetric, + IndexTotFileMergeIntroductionsMetric, + IndexTotFileMergeIntroductionsDoneMetric, + IndexTotFileMergeIntroductionsSkippedMetric, + IndexTotFileMergeLoopBegMetric, + IndexTotFileMergeLoopEndMetric, + IndexTotFileMergeLoopErrMetric, + IndexTotFileMergePlanMetric, + IndexTotFileMergePlanErrMetric, + IndexTotFileMergePlanNoneMetric, + IndexTotFileMergePlanOkMetric, + IndexTotFileMergePlanTasksMetric, + IndexTotFileMergePlanTasksDoneMetric, + IndexTotFileMergePlanTasksErrMetric, + IndexTotFileMergePlanTasksSegmentsMetric, + IndexTotFileMergePlanTasksSegmentsEmptyMetric, + IndexTotFileMergeSegmentsMetric, + IndexTotFileMergeSegmentsEmptyMetric, + IndexTotFileMergeWrittenBytesMetric, + IndexTotFileMergeZapBegMetric, + IndexTotFileMergeZapEndMetric, + IndexTotFileMergeZapTimeMetric, + IndexTotFileSegmentsAtRootMetric, + IndexTotIndexTimeMetric, + IndexTotIndexedPlainTextBytesMetric, + IndexTotIntroduceLoopMetric, + IndexTotIntroduceMergeBegMetric, + IndexTotIntroduceMergeEndMetric, + IndexTotIntroducePersistBegMetric, + IndexTotIntroducePersistEndMetric, + IndexTotIntroduceRevertBegMetric, + IndexTotIntroduceRevertEndMetric, + IndexTotIntroduceSegmentBegMetric, + IndexTotIntroduceSegmentEndMetric, + IndexTotIntroducedItemsMetric, + IndexTotIntroducedSegmentsBatchMetric, + IndexTotIntroducedSegmentsMergeMetric, + IndexTotItemsToPersistMetric, + IndexTotMemMergeBegMetric, + IndexTotMemMergeDoneMetric, + IndexTotMemMergeErrMetric, + IndexTotMemMergeSegmentsMetric, + IndexTotMemMergeZapBegMetric, + IndexTotMemMergeZapEndMetric, + IndexTotMemMergeZapTimeMetric, + IndexTotMemorySegmentsAtRootMetric, + IndexTotOnErrorsMetric, + IndexTotPersistLoopBegMetric, + IndexTotPersistLoopEndMetric, + IndexTotPersistLoopErrMetric, + IndexTotPersistLoopProgressMetric, + IndexTotPersistLoopWaitMetric, + IndexTotPersistLoopWaitNotifiedMetric, + IndexTotPersistedItemsMetric, + IndexTotPersistedSegmentsMetric, + IndexTotPersisterMergerNapBreakMetric, + IndexTotPersisterNapPauseCompletedMetric, + IndexTotPersisterSlowMergerPauseMetric, + IndexTotPersisterSlowMergerResumeMetric, + IndexTotTermSearchersFinishedMetric, + IndexTotTermSearchersStartedMetric, + IndexTotUpdatesMetric, + IndexAnalysisTimeMetric, + IndexBatchesMetric, + IndexDeletesMetric, + IndexErrorsMetric, + IndexIndexTimeMetric, + IndexNumBytesUsedDiskMetric, + IndexNumFilesOnDiskMetric, + IndexNumItemsIntroducedMetric, + IndexNumItemsPersistedMetric, + IndexNumPersisterNapMergerBreakMetric, + IndexNumPersisterNapPauseCompletedMetric, + IndexNumPlainTextBytesIndexedMetric, + IndexNumRecsToPersistMetric, + IndexNumRootFilesegmentsMetric, + IndexNumRootMemorysegmentsMetric, + IndexTermSearchersFinishedMetric, + IndexTermSearchersStartedMetric, + IndexTotalCompactionWrittenBytesMetric, + IndexUpdatesMetric, + SearchTimeMetric, + SearchesMetric, + ) + GrpcMetrics.EnableHandlingTimeHistogram( + func(o *prometheus.HistogramOpts) { + o.Namespace = "blast" + }, + ) +} diff --git a/protobuf/distribute/distribute.pb.go b/protobuf/distribute/distribute.pb.go deleted file mode 100644 index a942d09..0000000 --- a/protobuf/distribute/distribute.pb.go +++ /dev/null @@ -1,945 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: protobuf/distribute/distribute.proto - -package distribute - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" - empty "github.com/golang/protobuf/ptypes/empty" - index "github.com/mosuka/blast/protobuf/index" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type NodeHealthCheckRequest_Probe int32 - -const ( - NodeHealthCheckRequest_UNKNOWN NodeHealthCheckRequest_Probe = 0 - NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 1 - NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 2 - NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 3 -) - -var NodeHealthCheckRequest_Probe_name = map[int32]string{ - 0: "UNKNOWN", - 1: "HEALTHINESS", - 2: "LIVENESS", - 3: "READINESS", -} - -var NodeHealthCheckRequest_Probe_value = map[string]int32{ - "UNKNOWN": 0, - "HEALTHINESS": 1, - "LIVENESS": 2, - "READINESS": 3, -} - -func (x NodeHealthCheckRequest_Probe) String() string { - return proto.EnumName(NodeHealthCheckRequest_Probe_name, int32(x)) -} - -func (NodeHealthCheckRequest_Probe) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{0, 0} -} - -type NodeHealthCheckResponse_State int32 - -const ( - NodeHealthCheckResponse_UNKNOWN NodeHealthCheckResponse_State = 0 - NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 1 - NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 2 - NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 3 - NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 4 - NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 5 - NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 6 -) - -var NodeHealthCheckResponse_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "HEALTHY", - 2: "UNHEALTHY", - 3: "ALIVE", - 4: "DEAD", - 5: "READY", - 6: "NOT_READY", -} - -var NodeHealthCheckResponse_State_value = map[string]int32{ - "UNKNOWN": 0, - "HEALTHY": 1, - "UNHEALTHY": 2, - "ALIVE": 3, - "DEAD": 4, - "READY": 5, - "NOT_READY": 6, -} - -func (x NodeHealthCheckResponse_State) String() string { - return proto.EnumName(NodeHealthCheckResponse_State_name, int32(x)) -} - -func (NodeHealthCheckResponse_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{1, 0} -} - -type NodeHealthCheckRequest struct { - Probe NodeHealthCheckRequest_Probe `protobuf:"varint,1,opt,name=probe,proto3,enum=distribute.NodeHealthCheckRequest_Probe" json:"probe,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeHealthCheckRequest) Reset() { *m = NodeHealthCheckRequest{} } -func (m *NodeHealthCheckRequest) String() string { return proto.CompactTextString(m) } -func (*NodeHealthCheckRequest) ProtoMessage() {} -func (*NodeHealthCheckRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{0} -} - -func (m *NodeHealthCheckRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeHealthCheckRequest.Unmarshal(m, b) -} -func (m *NodeHealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeHealthCheckRequest.Marshal(b, m, deterministic) -} -func (m *NodeHealthCheckRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeHealthCheckRequest.Merge(m, src) -} -func (m *NodeHealthCheckRequest) XXX_Size() int { - return xxx_messageInfo_NodeHealthCheckRequest.Size(m) -} -func (m *NodeHealthCheckRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeHealthCheckRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeHealthCheckRequest proto.InternalMessageInfo - -func (m *NodeHealthCheckRequest) GetProbe() NodeHealthCheckRequest_Probe { - if m != nil { - return m.Probe - } - return NodeHealthCheckRequest_UNKNOWN -} - -type NodeHealthCheckResponse struct { - State NodeHealthCheckResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=distribute.NodeHealthCheckResponse_State" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeHealthCheckResponse) Reset() { *m = NodeHealthCheckResponse{} } -func (m *NodeHealthCheckResponse) String() string { return proto.CompactTextString(m) } -func (*NodeHealthCheckResponse) ProtoMessage() {} -func (*NodeHealthCheckResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{1} -} - -func (m *NodeHealthCheckResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeHealthCheckResponse.Unmarshal(m, b) -} -func (m *NodeHealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeHealthCheckResponse.Marshal(b, m, deterministic) -} -func (m *NodeHealthCheckResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeHealthCheckResponse.Merge(m, src) -} -func (m *NodeHealthCheckResponse) XXX_Size() int { - return xxx_messageInfo_NodeHealthCheckResponse.Size(m) -} -func (m *NodeHealthCheckResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeHealthCheckResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeHealthCheckResponse proto.InternalMessageInfo - -func (m *NodeHealthCheckResponse) GetState() NodeHealthCheckResponse_State { - if m != nil { - return m.State - } - return NodeHealthCheckResponse_UNKNOWN -} - -type GetRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetRequest) Reset() { *m = GetRequest{} } -func (m *GetRequest) String() string { return proto.CompactTextString(m) } -func (*GetRequest) ProtoMessage() {} -func (*GetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{2} -} - -func (m *GetRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetRequest.Unmarshal(m, b) -} -func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) -} -func (m *GetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetRequest.Merge(m, src) -} -func (m *GetRequest) XXX_Size() int { - return xxx_messageInfo_GetRequest.Size(m) -} -func (m *GetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetRequest proto.InternalMessageInfo - -func (m *GetRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -type GetResponse struct { - Fields *any.Any `protobuf:"bytes,1,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetResponse) Reset() { *m = GetResponse{} } -func (m *GetResponse) String() string { return proto.CompactTextString(m) } -func (*GetResponse) ProtoMessage() {} -func (*GetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{3} -} - -func (m *GetResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetResponse.Unmarshal(m, b) -} -func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) -} -func (m *GetResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetResponse.Merge(m, src) -} -func (m *GetResponse) XXX_Size() int { - return xxx_messageInfo_GetResponse.Size(m) -} -func (m *GetResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetResponse proto.InternalMessageInfo - -func (m *GetResponse) GetFields() *any.Any { - if m != nil { - return m.Fields - } - return nil -} - -type IndexRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexRequest) Reset() { *m = IndexRequest{} } -func (m *IndexRequest) String() string { return proto.CompactTextString(m) } -func (*IndexRequest) ProtoMessage() {} -func (*IndexRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{4} -} - -func (m *IndexRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexRequest.Unmarshal(m, b) -} -func (m *IndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexRequest.Marshal(b, m, deterministic) -} -func (m *IndexRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexRequest.Merge(m, src) -} -func (m *IndexRequest) XXX_Size() int { - return xxx_messageInfo_IndexRequest.Size(m) -} -func (m *IndexRequest) XXX_DiscardUnknown() { - xxx_messageInfo_IndexRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexRequest proto.InternalMessageInfo - -func (m *IndexRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *IndexRequest) GetFields() *any.Any { - if m != nil { - return m.Fields - } - return nil -} - -type DeleteRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRequest) ProtoMessage() {} -func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{5} -} - -func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) -} -func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) -} -func (m *DeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRequest.Merge(m, src) -} -func (m *DeleteRequest) XXX_Size() int { - return xxx_messageInfo_DeleteRequest.Size(m) -} -func (m *DeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo - -func (m *DeleteRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -type BulkIndexRequest struct { - Documents []*index.Document `protobuf:"bytes,1,rep,name=documents,proto3" json:"documents,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BulkIndexRequest) Reset() { *m = BulkIndexRequest{} } -func (m *BulkIndexRequest) String() string { return proto.CompactTextString(m) } -func (*BulkIndexRequest) ProtoMessage() {} -func (*BulkIndexRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{6} -} - -func (m *BulkIndexRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkIndexRequest.Unmarshal(m, b) -} -func (m *BulkIndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkIndexRequest.Marshal(b, m, deterministic) -} -func (m *BulkIndexRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkIndexRequest.Merge(m, src) -} -func (m *BulkIndexRequest) XXX_Size() int { - return xxx_messageInfo_BulkIndexRequest.Size(m) -} -func (m *BulkIndexRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BulkIndexRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BulkIndexRequest proto.InternalMessageInfo - -func (m *BulkIndexRequest) GetDocuments() []*index.Document { - if m != nil { - return m.Documents - } - return nil -} - -type BulkIndexResponse struct { - Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BulkIndexResponse) Reset() { *m = BulkIndexResponse{} } -func (m *BulkIndexResponse) String() string { return proto.CompactTextString(m) } -func (*BulkIndexResponse) ProtoMessage() {} -func (*BulkIndexResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{7} -} - -func (m *BulkIndexResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkIndexResponse.Unmarshal(m, b) -} -func (m *BulkIndexResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkIndexResponse.Marshal(b, m, deterministic) -} -func (m *BulkIndexResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkIndexResponse.Merge(m, src) -} -func (m *BulkIndexResponse) XXX_Size() int { - return xxx_messageInfo_BulkIndexResponse.Size(m) -} -func (m *BulkIndexResponse) XXX_DiscardUnknown() { - xxx_messageInfo_BulkIndexResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_BulkIndexResponse proto.InternalMessageInfo - -func (m *BulkIndexResponse) GetCount() int32 { - if m != nil { - return m.Count - } - return 0 -} - -type BulkDeleteRequest struct { - Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BulkDeleteRequest) Reset() { *m = BulkDeleteRequest{} } -func (m *BulkDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*BulkDeleteRequest) ProtoMessage() {} -func (*BulkDeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{8} -} - -func (m *BulkDeleteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkDeleteRequest.Unmarshal(m, b) -} -func (m *BulkDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkDeleteRequest.Marshal(b, m, deterministic) -} -func (m *BulkDeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkDeleteRequest.Merge(m, src) -} -func (m *BulkDeleteRequest) XXX_Size() int { - return xxx_messageInfo_BulkDeleteRequest.Size(m) -} -func (m *BulkDeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BulkDeleteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BulkDeleteRequest proto.InternalMessageInfo - -func (m *BulkDeleteRequest) GetIds() []string { - if m != nil { - return m.Ids - } - return nil -} - -type BulkDeleteResponse struct { - Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BulkDeleteResponse) Reset() { *m = BulkDeleteResponse{} } -func (m *BulkDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*BulkDeleteResponse) ProtoMessage() {} -func (*BulkDeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{9} -} - -func (m *BulkDeleteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkDeleteResponse.Unmarshal(m, b) -} -func (m *BulkDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkDeleteResponse.Marshal(b, m, deterministic) -} -func (m *BulkDeleteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkDeleteResponse.Merge(m, src) -} -func (m *BulkDeleteResponse) XXX_Size() int { - return xxx_messageInfo_BulkDeleteResponse.Size(m) -} -func (m *BulkDeleteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_BulkDeleteResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_BulkDeleteResponse proto.InternalMessageInfo - -func (m *BulkDeleteResponse) GetCount() int32 { - if m != nil { - return m.Count - } - return 0 -} - -type SearchRequest struct { - SearchRequest *any.Any `protobuf:"bytes,1,opt,name=search_request,json=searchRequest,proto3" json:"search_request,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SearchRequest) Reset() { *m = SearchRequest{} } -func (m *SearchRequest) String() string { return proto.CompactTextString(m) } -func (*SearchRequest) ProtoMessage() {} -func (*SearchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{10} -} - -func (m *SearchRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SearchRequest.Unmarshal(m, b) -} -func (m *SearchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SearchRequest.Marshal(b, m, deterministic) -} -func (m *SearchRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SearchRequest.Merge(m, src) -} -func (m *SearchRequest) XXX_Size() int { - return xxx_messageInfo_SearchRequest.Size(m) -} -func (m *SearchRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SearchRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SearchRequest proto.InternalMessageInfo - -func (m *SearchRequest) GetSearchRequest() *any.Any { - if m != nil { - return m.SearchRequest - } - return nil -} - -type SearchResponse struct { - SearchResult *any.Any `protobuf:"bytes,1,opt,name=search_result,json=searchResult,proto3" json:"search_result,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SearchResponse) Reset() { *m = SearchResponse{} } -func (m *SearchResponse) String() string { return proto.CompactTextString(m) } -func (*SearchResponse) ProtoMessage() {} -func (*SearchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0b1b3e8a99d31c9c, []int{11} -} - -func (m *SearchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SearchResponse.Unmarshal(m, b) -} -func (m *SearchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SearchResponse.Marshal(b, m, deterministic) -} -func (m *SearchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SearchResponse.Merge(m, src) -} -func (m *SearchResponse) XXX_Size() int { - return xxx_messageInfo_SearchResponse.Size(m) -} -func (m *SearchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SearchResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SearchResponse proto.InternalMessageInfo - -func (m *SearchResponse) GetSearchResult() *any.Any { - if m != nil { - return m.SearchResult - } - return nil -} - -func init() { - proto.RegisterEnum("distribute.NodeHealthCheckRequest_Probe", NodeHealthCheckRequest_Probe_name, NodeHealthCheckRequest_Probe_value) - proto.RegisterEnum("distribute.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) - proto.RegisterType((*NodeHealthCheckRequest)(nil), "distribute.NodeHealthCheckRequest") - proto.RegisterType((*NodeHealthCheckResponse)(nil), "distribute.NodeHealthCheckResponse") - proto.RegisterType((*GetRequest)(nil), "distribute.GetRequest") - proto.RegisterType((*GetResponse)(nil), "distribute.GetResponse") - proto.RegisterType((*IndexRequest)(nil), "distribute.IndexRequest") - proto.RegisterType((*DeleteRequest)(nil), "distribute.DeleteRequest") - proto.RegisterType((*BulkIndexRequest)(nil), "distribute.BulkIndexRequest") - proto.RegisterType((*BulkIndexResponse)(nil), "distribute.BulkIndexResponse") - proto.RegisterType((*BulkDeleteRequest)(nil), "distribute.BulkDeleteRequest") - proto.RegisterType((*BulkDeleteResponse)(nil), "distribute.BulkDeleteResponse") - proto.RegisterType((*SearchRequest)(nil), "distribute.SearchRequest") - proto.RegisterType((*SearchResponse)(nil), "distribute.SearchResponse") -} - -func init() { - proto.RegisterFile("protobuf/distribute/distribute.proto", fileDescriptor_0b1b3e8a99d31c9c) -} - -var fileDescriptor_0b1b3e8a99d31c9c = []byte{ - // 759 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0xdd, 0x4e, 0xdb, 0x48, - 0x18, 0x5d, 0x27, 0x38, 0x90, 0x2f, 0x24, 0xf1, 0xce, 0x42, 0x00, 0x6f, 0xd8, 0x5d, 0x79, 0x77, - 0x25, 0xf0, 0x2e, 0xb6, 0x36, 0xdb, 0x9b, 0x82, 0xda, 0x2a, 0x34, 0x11, 0x20, 0xa2, 0x50, 0x39, - 0x40, 0x05, 0x52, 0x45, 0x9d, 0x78, 0x20, 0x56, 0x1c, 0x3b, 0x8d, 0xc7, 0x15, 0xa8, 0xea, 0x4d, - 0x5f, 0xa1, 0xb7, 0x7d, 0x93, 0x5e, 0xf6, 0x11, 0xfa, 0x0a, 0x7d, 0x90, 0x6a, 0x66, 0xec, 0xc4, - 0x26, 0x3f, 0xe5, 0x06, 0xf9, 0xfb, 0x3b, 0xe7, 0xcc, 0x7c, 0x67, 0x08, 0xfc, 0x35, 0x18, 0x7a, - 0xc4, 0x6b, 0x07, 0xd7, 0xba, 0x65, 0xfb, 0x64, 0x68, 0xb7, 0x03, 0x82, 0x63, 0x9f, 0x1a, 0x2b, - 0x23, 0x18, 0x67, 0xe4, 0x8d, 0x1b, 0xcf, 0xbb, 0x71, 0xb0, 0x3e, 0x1a, 0x34, 0xdd, 0x3b, 0xde, - 0x26, 0xff, 0x7a, 0xbf, 0x84, 0xfb, 0x03, 0x12, 0x15, 0xe5, 0x51, 0xd6, 0x76, 0x2d, 0x7c, 0xcb, - 0xff, 0x86, 0xb5, 0x72, 0x38, 0x68, 0x0e, 0x6c, 0xdd, 0x74, 0x5d, 0x8f, 0x98, 0xc4, 0xf6, 0x5c, - 0x9f, 0x57, 0x95, 0x4f, 0x02, 0x94, 0x9a, 0x9e, 0x85, 0x0f, 0xb1, 0xe9, 0x90, 0xee, 0xf3, 0x2e, - 0xee, 0xf4, 0x0c, 0xfc, 0x26, 0xc0, 0x3e, 0x41, 0x4f, 0x41, 0x1c, 0x0c, 0xbd, 0x36, 0x5e, 0x17, - 0xfe, 0x10, 0xb6, 0x0a, 0x95, 0x2d, 0x2d, 0x26, 0x7d, 0xfa, 0x88, 0xf6, 0x82, 0xf6, 0x1b, 0x7c, - 0x4c, 0xd9, 0x07, 0x91, 0xc5, 0x28, 0x07, 0x8b, 0x67, 0xcd, 0xe3, 0xe6, 0xc9, 0xcb, 0xa6, 0xf4, - 0x13, 0x2a, 0x42, 0xee, 0xb0, 0x5e, 0x6d, 0x9c, 0x1e, 0x1e, 0x35, 0xeb, 0xad, 0x96, 0x24, 0xa0, - 0x65, 0x58, 0x6a, 0x1c, 0x9d, 0xd7, 0x59, 0x94, 0x42, 0x79, 0xc8, 0x1a, 0xf5, 0x6a, 0x8d, 0x17, - 0xd3, 0xca, 0x67, 0x01, 0xd6, 0x26, 0xb8, 0xfc, 0x81, 0xe7, 0xfa, 0x18, 0x3d, 0x03, 0xd1, 0x27, - 0x26, 0x89, 0xf4, 0x6d, 0xcf, 0xd5, 0xc7, 0x67, 0xb4, 0x16, 0x1d, 0x30, 0xf8, 0x9c, 0x72, 0x05, - 0x22, 0x8b, 0x93, 0x02, 0x73, 0xb0, 0xc8, 0x05, 0x5e, 0x48, 0x02, 0x95, 0x73, 0xd6, 0x8c, 0xc2, - 0x14, 0xca, 0x82, 0x58, 0xa5, 0x62, 0xa5, 0x34, 0x5a, 0x82, 0x85, 0x5a, 0xbd, 0x5a, 0x93, 0x16, - 0x68, 0x92, 0x4a, 0xbe, 0x90, 0x44, 0xda, 0xde, 0x3c, 0x39, 0xbd, 0xe2, 0x61, 0x46, 0x29, 0x03, - 0x1c, 0x60, 0x12, 0xdd, 0x67, 0x01, 0x52, 0xb6, 0xc5, 0xc4, 0x66, 0x8d, 0x94, 0x6d, 0x29, 0x7b, - 0x90, 0x63, 0xd5, 0xf0, 0x38, 0xff, 0x42, 0xe6, 0xda, 0xc6, 0x8e, 0xe5, 0xb3, 0x96, 0x5c, 0x65, - 0x45, 0xe3, 0x8b, 0xd3, 0xa2, 0xdd, 0x6a, 0x55, 0xf7, 0xce, 0x08, 0x7b, 0x94, 0x06, 0x2c, 0x1f, - 0xd1, 0x25, 0xcf, 0x00, 0x8f, 0xa1, 0xa5, 0x1e, 0x80, 0xf6, 0x3b, 0xe4, 0x6b, 0xd8, 0xc1, 0x04, - 0xcf, 0xd2, 0x5a, 0x05, 0x69, 0x3f, 0x70, 0x7a, 0x09, 0xca, 0x1d, 0xc8, 0x5a, 0x5e, 0x27, 0xe8, - 0x63, 0x97, 0x50, 0xcd, 0xe9, 0xad, 0x5c, 0xa5, 0xa8, 0x71, 0xe7, 0xd5, 0xc2, 0xbc, 0x31, 0xee, - 0x50, 0xb6, 0xe1, 0xe7, 0x18, 0x44, 0x78, 0xe8, 0x15, 0x10, 0x3b, 0x5e, 0xe0, 0x12, 0x46, 0x25, - 0x1a, 0x3c, 0x50, 0xfe, 0xe6, 0xad, 0x49, 0x49, 0x12, 0xa4, 0x6d, 0x8b, 0x13, 0x65, 0x0d, 0xfa, - 0xa9, 0xa8, 0x80, 0xe2, 0x6d, 0x73, 0x21, 0x1b, 0x90, 0x6f, 0x61, 0x73, 0xd8, 0xe9, 0x46, 0x70, - 0x7b, 0x50, 0xf0, 0x59, 0xe2, 0x6a, 0xc8, 0x33, 0x73, 0xaf, 0x3d, 0xef, 0xc7, 0x87, 0x95, 0x63, - 0x28, 0x44, 0x68, 0x21, 0xeb, 0x63, 0xc8, 0x8f, 0xe0, 0xfc, 0xc0, 0x99, 0x8f, 0xb6, 0x1c, 0xa1, - 0xd1, 0xce, 0xca, 0x17, 0x11, 0xa0, 0x36, 0xb2, 0x2e, 0xba, 0x85, 0xe2, 0x3d, 0xf7, 0x22, 0xe5, - 0xc7, 0x4f, 0x4f, 0xfe, 0xf3, 0x01, 0xf6, 0x57, 0xca, 0x1f, 0xbe, 0x7e, 0xfb, 0x98, 0x2a, 0xa1, - 0x15, 0xfd, 0xed, 0x7f, 0xba, 0xeb, 0x59, 0x58, 0xef, 0xb2, 0xae, 0x0e, 0xa3, 0x39, 0x83, 0xf4, - 0x01, 0x26, 0xa8, 0x14, 0x47, 0x1a, 0xfb, 0x57, 0x5e, 0x9b, 0xc8, 0x87, 0xa8, 0x9b, 0x0c, 0x75, - 0x0d, 0xad, 0x52, 0xd4, 0xd1, 0xc2, 0xf5, 0x77, 0xb6, 0xf5, 0x44, 0x55, 0xdf, 0x23, 0x0f, 0x44, - 0xb6, 0x74, 0xb4, 0x1e, 0x07, 0x88, 0x5b, 0x49, 0x2e, 0x4d, 0x5c, 0x53, 0x9d, 0xfe, 0x77, 0x53, - 0x1e, 0x31, 0x64, 0x4d, 0xce, 0x27, 0x90, 0x77, 0x05, 0xf5, 0x52, 0x96, 0xa7, 0xb3, 0xed, 0x0a, - 0x2a, 0xba, 0x84, 0x0c, 0xf7, 0x04, 0xda, 0x88, 0x33, 0x26, 0xec, 0x34, 0x93, 0x32, 0x3c, 0x8c, - 0x3a, 0xe3, 0x30, 0xaf, 0x20, 0x3b, 0x72, 0x31, 0x2a, 0xc7, 0xe1, 0xef, 0xbf, 0x0f, 0x79, 0x73, - 0x46, 0x35, 0xbc, 0xb5, 0x5f, 0x18, 0x51, 0x5e, 0x5e, 0xa2, 0x44, 0xed, 0xc0, 0xe9, 0x51, 0xe9, - 0xaf, 0x01, 0xc6, 0x96, 0x46, 0x13, 0x08, 0xc9, 0x23, 0xfc, 0x36, 0xab, 0x9c, 0x64, 0x50, 0x13, - 0x0c, 0xe7, 0x90, 0xe1, 0xd6, 0x4d, 0x5e, 0x4e, 0xe2, 0x71, 0xc8, 0xf2, 0xb4, 0x52, 0x88, 0xba, - 0xca, 0x50, 0x8b, 0x0a, 0x50, 0x54, 0x6e, 0xe4, 0x5d, 0x41, 0xdd, 0xdf, 0xb9, 0xfc, 0xe7, 0xc6, - 0x26, 0xdd, 0xa0, 0xad, 0x75, 0xbc, 0xbe, 0xde, 0xf7, 0xfc, 0xa0, 0x67, 0xea, 0x6d, 0xc7, 0xf4, - 0x89, 0x3e, 0xe5, 0x67, 0xb0, 0x9d, 0x61, 0xc9, 0xff, 0xbf, 0x07, 0x00, 0x00, 0xff, 0xff, 0x7f, - 0xf4, 0x78, 0x1a, 0x24, 0x07, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// DistributeClient is the client API for Distribute service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type DistributeClient interface { - NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) - Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) - Index(ctx context.Context, in *IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) - Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) - BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) - BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) - Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) -} - -type distributeClient struct { - cc *grpc.ClientConn -} - -func NewDistributeClient(cc *grpc.ClientConn) DistributeClient { - return &distributeClient{cc} -} - -func (c *distributeClient) NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) { - out := new(NodeHealthCheckResponse) - err := c.cc.Invoke(ctx, "/distribute.Distribute/NodeHealthCheck", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *distributeClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { - out := new(GetResponse) - err := c.cc.Invoke(ctx, "/distribute.Distribute/Get", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *distributeClient) Index(ctx context.Context, in *IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/distribute.Distribute/Index", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *distributeClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/distribute.Distribute/Delete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *distributeClient) BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) { - out := new(BulkIndexResponse) - err := c.cc.Invoke(ctx, "/distribute.Distribute/BulkIndex", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *distributeClient) BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) { - out := new(BulkDeleteResponse) - err := c.cc.Invoke(ctx, "/distribute.Distribute/BulkDelete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *distributeClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { - out := new(SearchResponse) - err := c.cc.Invoke(ctx, "/distribute.Distribute/Search", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// DistributeServer is the server API for Distribute service. -type DistributeServer interface { - NodeHealthCheck(context.Context, *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) - Get(context.Context, *GetRequest) (*GetResponse, error) - Index(context.Context, *IndexRequest) (*empty.Empty, error) - Delete(context.Context, *DeleteRequest) (*empty.Empty, error) - BulkIndex(context.Context, *BulkIndexRequest) (*BulkIndexResponse, error) - BulkDelete(context.Context, *BulkDeleteRequest) (*BulkDeleteResponse, error) - Search(context.Context, *SearchRequest) (*SearchResponse, error) -} - -// UnimplementedDistributeServer can be embedded to have forward compatible implementations. -type UnimplementedDistributeServer struct { -} - -func (*UnimplementedDistributeServer) NodeHealthCheck(ctx context.Context, req *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodeHealthCheck not implemented") -} -func (*UnimplementedDistributeServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") -} -func (*UnimplementedDistributeServer) Index(ctx context.Context, req *IndexRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Index not implemented") -} -func (*UnimplementedDistributeServer) Delete(ctx context.Context, req *DeleteRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} -func (*UnimplementedDistributeServer) BulkIndex(ctx context.Context, req *BulkIndexRequest) (*BulkIndexResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method BulkIndex not implemented") -} -func (*UnimplementedDistributeServer) BulkDelete(ctx context.Context, req *BulkDeleteRequest) (*BulkDeleteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method BulkDelete not implemented") -} -func (*UnimplementedDistributeServer) Search(ctx context.Context, req *SearchRequest) (*SearchResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Search not implemented") -} - -func RegisterDistributeServer(s *grpc.Server, srv DistributeServer) { - s.RegisterService(&_Distribute_serviceDesc, srv) -} - -func _Distribute_NodeHealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeHealthCheckRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DistributeServer).NodeHealthCheck(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/distribute.Distribute/NodeHealthCheck", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).NodeHealthCheck(ctx, req.(*NodeHealthCheckRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Distribute_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DistributeServer).Get(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/distribute.Distribute/Get", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).Get(ctx, req.(*GetRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Distribute_Index_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(IndexRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DistributeServer).Index(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/distribute.Distribute/Index", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).Index(ctx, req.(*IndexRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Distribute_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DistributeServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/distribute.Distribute/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).Delete(ctx, req.(*DeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Distribute_BulkIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(BulkIndexRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DistributeServer).BulkIndex(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/distribute.Distribute/BulkIndex", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).BulkIndex(ctx, req.(*BulkIndexRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Distribute_BulkDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(BulkDeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DistributeServer).BulkDelete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/distribute.Distribute/BulkDelete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).BulkDelete(ctx, req.(*BulkDeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Distribute_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SearchRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DistributeServer).Search(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/distribute.Distribute/Search", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).Search(ctx, req.(*SearchRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Distribute_serviceDesc = grpc.ServiceDesc{ - ServiceName: "distribute.Distribute", - HandlerType: (*DistributeServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "NodeHealthCheck", - Handler: _Distribute_NodeHealthCheck_Handler, - }, - { - MethodName: "Get", - Handler: _Distribute_Get_Handler, - }, - { - MethodName: "Index", - Handler: _Distribute_Index_Handler, - }, - { - MethodName: "Delete", - Handler: _Distribute_Delete_Handler, - }, - { - MethodName: "BulkIndex", - Handler: _Distribute_BulkIndex_Handler, - }, - { - MethodName: "BulkDelete", - Handler: _Distribute_BulkDelete_Handler, - }, - { - MethodName: "Search", - Handler: _Distribute_Search_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "protobuf/distribute/distribute.proto", -} diff --git a/protobuf/distribute/distribute.pb.gw.go b/protobuf/distribute/distribute.pb.gw.go deleted file mode 100644 index e540253..0000000 --- a/protobuf/distribute/distribute.pb.gw.go +++ /dev/null @@ -1,443 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: protobuf/distribute/distribute.proto - -/* -Package distribute is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package distribute - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray - -var ( - filter_Distribute_NodeHealthCheck_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Distribute_NodeHealthCheck_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq NodeHealthCheckRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Distribute_NodeHealthCheck_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.NodeHealthCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Distribute_Get_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") - } - - protoReq.Id, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) - } - - msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Distribute_Index_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq IndexRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Index(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Distribute_Index_1(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq IndexRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") - } - - protoReq.Id, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) - } - - msg, err := client.Index(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Distribute_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DeleteRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") - } - - protoReq.Id, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) - } - - msg, err := client.Delete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Distribute_BulkIndex_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq BulkIndexRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.BulkIndex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Distribute_BulkDelete_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq BulkDeleteRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.BulkDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Distribute_Search_0(ctx context.Context, marshaler runtime.Marshaler, client DistributeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SearchRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Search(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -// RegisterDistributeHandlerFromEndpoint is same as RegisterDistributeHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterDistributeHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterDistributeHandler(ctx, mux, conn) -} - -// RegisterDistributeHandler registers the http handlers for service Distribute to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterDistributeHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterDistributeHandlerClient(ctx, mux, NewDistributeClient(conn)) -} - -// RegisterDistributeHandlerClient registers the http handlers for service Distribute -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "DistributeClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "DistributeClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "DistributeClient" to call the correct interceptors. -func RegisterDistributeHandlerClient(ctx context.Context, mux *runtime.ServeMux, client DistributeClient) error { - - mux.Handle("GET", pattern_Distribute_NodeHealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Distribute_NodeHealthCheck_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Distribute_NodeHealthCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Distribute_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Distribute_Get_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Distribute_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("PUT", pattern_Distribute_Index_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Distribute_Index_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Distribute_Index_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("PUT", pattern_Distribute_Index_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Distribute_Index_1(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Distribute_Index_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_Distribute_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Distribute_Delete_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Distribute_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("PUT", pattern_Distribute_BulkIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Distribute_BulkIndex_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Distribute_BulkIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_Distribute_BulkDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Distribute_BulkDelete_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Distribute_BulkDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Distribute_Search_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Distribute_Search_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Distribute_Search_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Distribute_NodeHealthCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "healthcheck"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Distribute_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Distribute_Index_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "documents"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Distribute_Index_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Distribute_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Distribute_BulkIndex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "bulk"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Distribute_BulkDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "bulk"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Distribute_Search_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "search"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Distribute_NodeHealthCheck_0 = runtime.ForwardResponseMessage - - forward_Distribute_Get_0 = runtime.ForwardResponseMessage - - forward_Distribute_Index_0 = runtime.ForwardResponseMessage - - forward_Distribute_Index_1 = runtime.ForwardResponseMessage - - forward_Distribute_Delete_0 = runtime.ForwardResponseMessage - - forward_Distribute_BulkIndex_0 = runtime.ForwardResponseMessage - - forward_Distribute_BulkDelete_0 = runtime.ForwardResponseMessage - - forward_Distribute_Search_0 = runtime.ForwardResponseMessage -) diff --git a/protobuf/distribute/distribute.proto b/protobuf/distribute/distribute.proto deleted file mode 100644 index beaf5a6..0000000 --- a/protobuf/distribute/distribute.proto +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -import "google/protobuf/any.proto"; -import "google/protobuf/empty.proto"; -import "protobuf/index/index.proto"; -import "google/api/annotations.proto"; - -package distribute; - -option go_package = "github.com/mosuka/blast/protobuf/distribute"; - -service Distribute { - rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) { - option (google.api.http) = { - get: "/v1/node/healthcheck" - }; - } - - rpc Get (GetRequest) returns (GetResponse) { - option (google.api.http) = { - get: "/v1/documents/{id=**}" - }; - } - rpc Index (IndexRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - put: "/v1/documents" - body: "*" - additional_bindings { - put: "/v1/documents/{id=**}" - body: "*" - } - }; - } - rpc Delete (DeleteRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/documents/{id=**}" - }; - } - rpc BulkIndex (BulkIndexRequest) returns (BulkIndexResponse) { - option (google.api.http) = { - put: "/v1/bulk" - body: "*" - }; - } - rpc BulkDelete (BulkDeleteRequest) returns (BulkDeleteResponse) { - option (google.api.http) = { - delete: "/v1/bulk" - body: "*" - }; - } - rpc Search (SearchRequest) returns (SearchResponse) { - option (google.api.http) = { - post: "/v1/search" - body: "*" - }; - } -} - -message NodeHealthCheckRequest { - enum Probe { - UNKNOWN = 0; - HEALTHINESS = 1; - LIVENESS = 2; - READINESS = 3; - } - Probe probe = 1; -} - -message NodeHealthCheckResponse { - enum State { - UNKNOWN = 0; - HEALTHY = 1; - UNHEALTHY = 2; - ALIVE = 3; - DEAD = 4; - READY = 5; - NOT_READY = 6; - } - State state = 1; -} - -message GetRequest { - string id = 1; -} - -message GetResponse { - google.protobuf.Any fields = 1; -} - -message IndexRequest { - string id = 1; - google.protobuf.Any fields = 2; -} - -message DeleteRequest { - string id = 1; -} - -message BulkIndexRequest { - repeated index.Document documents = 1; -} - -message BulkIndexResponse { - int32 count = 1; -} - -message BulkDeleteRequest { - repeated string ids = 1; -} - -message BulkDeleteResponse { - int32 count = 1; -} - -message SearchRequest { - google.protobuf.Any search_request = 1; -} - -message SearchResponse { - google.protobuf.Any search_result = 1; -} diff --git a/protobuf/distribute/distribute.swagger.json b/protobuf/distribute/distribute.swagger.json deleted file mode 100644 index 8ddf64d..0000000 --- a/protobuf/distribute/distribute.swagger.json +++ /dev/null @@ -1,362 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "protobuf/distribute/distribute.proto", - "version": "version not set" - }, - "schemes": [ - "http", - "https" - ], - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/v1/bulk": { - "delete": { - "operationId": "BulkDelete", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/distributeBulkDeleteResponse" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/distributeBulkDeleteRequest" - } - } - ], - "tags": [ - "Distribute" - ] - }, - "put": { - "operationId": "BulkIndex", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/distributeBulkIndexResponse" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/distributeBulkIndexRequest" - } - } - ], - "tags": [ - "Distribute" - ] - } - }, - "/v1/documents": { - "put": { - "operationId": "Index", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "properties": {} - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/distributeIndexRequest" - } - } - ], - "tags": [ - "Distribute" - ] - } - }, - "/v1/documents/{id}": { - "get": { - "operationId": "Get", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/distributeGetResponse" - } - } - }, - "parameters": [ - { - "name": "id", - "in": "path", - "required": true, - "type": "string" - } - ], - "tags": [ - "Distribute" - ] - }, - "delete": { - "operationId": "Delete", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "properties": {} - } - } - }, - "parameters": [ - { - "name": "id", - "in": "path", - "required": true, - "type": "string" - } - ], - "tags": [ - "Distribute" - ] - }, - "put": { - "operationId": "Index2", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "properties": {} - } - } - }, - "parameters": [ - { - "name": "id", - "in": "path", - "required": true, - "type": "string" - }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/distributeIndexRequest" - } - } - ], - "tags": [ - "Distribute" - ] - } - }, - "/v1/node/healthcheck": { - "get": { - "operationId": "NodeHealthCheck", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/distributeNodeHealthCheckResponse" - } - } - }, - "parameters": [ - { - "name": "probe", - "in": "query", - "required": false, - "type": "string", - "enum": [ - "UNKNOWN", - "HEALTHINESS", - "LIVENESS", - "READINESS" - ], - "default": "UNKNOWN" - } - ], - "tags": [ - "Distribute" - ] - } - }, - "/v1/search": { - "post": { - "operationId": "Search", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/distributeSearchResponse" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/distributeSearchRequest" - } - } - ], - "tags": [ - "Distribute" - ] - } - } - }, - "definitions": { - "distributeBulkDeleteRequest": { - "type": "object", - "properties": { - "ids": { - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "distributeBulkDeleteResponse": { - "type": "object", - "properties": { - "count": { - "type": "integer", - "format": "int32" - } - } - }, - "distributeBulkIndexRequest": { - "type": "object", - "properties": { - "documents": { - "type": "array", - "items": { - "$ref": "#/definitions/indexDocument" - } - } - } - }, - "distributeBulkIndexResponse": { - "type": "object", - "properties": { - "count": { - "type": "integer", - "format": "int32" - } - } - }, - "distributeGetResponse": { - "type": "object", - "properties": { - "fields": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "distributeIndexRequest": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "fields": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "distributeNodeHealthCheckRequestProbe": { - "type": "string", - "enum": [ - "UNKNOWN", - "HEALTHINESS", - "LIVENESS", - "READINESS" - ], - "default": "UNKNOWN" - }, - "distributeNodeHealthCheckResponse": { - "type": "object", - "properties": { - "state": { - "$ref": "#/definitions/distributeNodeHealthCheckResponseState" - } - } - }, - "distributeNodeHealthCheckResponseState": { - "type": "string", - "enum": [ - "UNKNOWN", - "HEALTHY", - "UNHEALTHY", - "ALIVE", - "DEAD", - "READY", - "NOT_READY" - ], - "default": "UNKNOWN" - }, - "distributeSearchRequest": { - "type": "object", - "properties": { - "search_request": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "distributeSearchResponse": { - "type": "object", - "properties": { - "search_result": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "indexDocument": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "fields": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string", - "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." - }, - "value": { - "type": "string", - "format": "byte", - "description": "Must be a valid serialized protocol buffer of the above specified type." - } - }, - "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" - } - } -} diff --git a/protobuf/index.pb.go b/protobuf/index.pb.go new file mode 100644 index 0000000..cddc642 --- /dev/null +++ b/protobuf/index.pb.go @@ -0,0 +1,1913 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: protobuf/index.proto + +package protobuf + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + empty "github.com/golang/protobuf/ptypes/empty" + _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Event_Type int32 + +const ( + Event_Unknown Event_Type = 0 + Event_Join Event_Type = 1 + Event_Leave Event_Type = 2 + Event_Set Event_Type = 3 + Event_Delete Event_Type = 4 + Event_BulkIndex Event_Type = 5 + Event_BulkDelete Event_Type = 6 +) + +var Event_Type_name = map[int32]string{ + 0: "Unknown", + 1: "Join", + 2: "Leave", + 3: "Set", + 4: "Delete", + 5: "BulkIndex", + 6: "BulkDelete", +} + +var Event_Type_value = map[string]int32{ + "Unknown": 0, + "Join": 1, + "Leave": 2, + "Set": 3, + "Delete": 4, + "BulkIndex": 5, + "BulkDelete": 6, +} + +func (x Event_Type) String() string { + return proto.EnumName(Event_Type_name, int32(x)) +} + +func (Event_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{23, 0} +} + +type LivenessCheckResponse struct { + Alive bool `protobuf:"varint,1,opt,name=alive,proto3" json:"alive,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LivenessCheckResponse) Reset() { *m = LivenessCheckResponse{} } +func (m *LivenessCheckResponse) String() string { return proto.CompactTextString(m) } +func (*LivenessCheckResponse) ProtoMessage() {} +func (*LivenessCheckResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{0} +} + +func (m *LivenessCheckResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LivenessCheckResponse.Unmarshal(m, b) +} +func (m *LivenessCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LivenessCheckResponse.Marshal(b, m, deterministic) +} +func (m *LivenessCheckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LivenessCheckResponse.Merge(m, src) +} +func (m *LivenessCheckResponse) XXX_Size() int { + return xxx_messageInfo_LivenessCheckResponse.Size(m) +} +func (m *LivenessCheckResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LivenessCheckResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LivenessCheckResponse proto.InternalMessageInfo + +func (m *LivenessCheckResponse) GetAlive() bool { + if m != nil { + return m.Alive + } + return false +} + +type ReadinessCheckResponse struct { + Ready bool `protobuf:"varint,1,opt,name=ready,proto3" json:"ready,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadinessCheckResponse) Reset() { *m = ReadinessCheckResponse{} } +func (m *ReadinessCheckResponse) String() string { return proto.CompactTextString(m) } +func (*ReadinessCheckResponse) ProtoMessage() {} +func (*ReadinessCheckResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{1} +} + +func (m *ReadinessCheckResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadinessCheckResponse.Unmarshal(m, b) +} +func (m *ReadinessCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadinessCheckResponse.Marshal(b, m, deterministic) +} +func (m *ReadinessCheckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadinessCheckResponse.Merge(m, src) +} +func (m *ReadinessCheckResponse) XXX_Size() int { + return xxx_messageInfo_ReadinessCheckResponse.Size(m) +} +func (m *ReadinessCheckResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReadinessCheckResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadinessCheckResponse proto.InternalMessageInfo + +func (m *ReadinessCheckResponse) GetReady() bool { + if m != nil { + return m.Ready + } + return false +} + +type Metadata struct { + GrpcAddress string `protobuf:"bytes,1,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` + HttpAddress string `protobuf:"bytes,2,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{2} +} + +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metadata.Unmarshal(m, b) +} +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) +} +func (m *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(m, src) +} +func (m *Metadata) XXX_Size() int { + return xxx_messageInfo_Metadata.Size(m) +} +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) +} + +var xxx_messageInfo_Metadata proto.InternalMessageInfo + +func (m *Metadata) GetGrpcAddress() string { + if m != nil { + return m.GrpcAddress + } + return "" +} + +func (m *Metadata) GetHttpAddress() string { + if m != nil { + return m.HttpAddress + } + return "" +} + +type Node struct { + RaftAddress string `protobuf:"bytes,1,opt,name=raft_address,json=raftAddress,proto3" json:"raft_address,omitempty"` + Metadata *Metadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Node) Reset() { *m = Node{} } +func (m *Node) String() string { return proto.CompactTextString(m) } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{3} +} + +func (m *Node) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Node.Unmarshal(m, b) +} +func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Node.Marshal(b, m, deterministic) +} +func (m *Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_Node.Merge(m, src) +} +func (m *Node) XXX_Size() int { + return xxx_messageInfo_Node.Size(m) +} +func (m *Node) XXX_DiscardUnknown() { + xxx_messageInfo_Node.DiscardUnknown(m) +} + +var xxx_messageInfo_Node proto.InternalMessageInfo + +func (m *Node) GetRaftAddress() string { + if m != nil { + return m.RaftAddress + } + return "" +} + +func (m *Node) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Node) GetState() string { + if m != nil { + return m.State + } + return "" +} + +type Cluster struct { + Nodes map[string]*Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Leader string `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{4} +} + +func (m *Cluster) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Cluster.Unmarshal(m, b) +} +func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) +} +func (m *Cluster) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cluster.Merge(m, src) +} +func (m *Cluster) XXX_Size() int { + return xxx_messageInfo_Cluster.Size(m) +} +func (m *Cluster) XXX_DiscardUnknown() { + xxx_messageInfo_Cluster.DiscardUnknown(m) +} + +var xxx_messageInfo_Cluster proto.InternalMessageInfo + +func (m *Cluster) GetNodes() map[string]*Node { + if m != nil { + return m.Nodes + } + return nil +} + +func (m *Cluster) GetLeader() string { + if m != nil { + return m.Leader + } + return "" +} + +type JoinRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *JoinRequest) Reset() { *m = JoinRequest{} } +func (m *JoinRequest) String() string { return proto.CompactTextString(m) } +func (*JoinRequest) ProtoMessage() {} +func (*JoinRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{5} +} + +func (m *JoinRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_JoinRequest.Unmarshal(m, b) +} +func (m *JoinRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_JoinRequest.Marshal(b, m, deterministic) +} +func (m *JoinRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_JoinRequest.Merge(m, src) +} +func (m *JoinRequest) XXX_Size() int { + return xxx_messageInfo_JoinRequest.Size(m) +} +func (m *JoinRequest) XXX_DiscardUnknown() { + xxx_messageInfo_JoinRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_JoinRequest proto.InternalMessageInfo + +func (m *JoinRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *JoinRequest) GetNode() *Node { + if m != nil { + return m.Node + } + return nil +} + +type LeaveRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaveRequest) Reset() { *m = LeaveRequest{} } +func (m *LeaveRequest) String() string { return proto.CompactTextString(m) } +func (*LeaveRequest) ProtoMessage() {} +func (*LeaveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{6} +} + +func (m *LeaveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LeaveRequest.Unmarshal(m, b) +} +func (m *LeaveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LeaveRequest.Marshal(b, m, deterministic) +} +func (m *LeaveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaveRequest.Merge(m, src) +} +func (m *LeaveRequest) XXX_Size() int { + return xxx_messageInfo_LeaveRequest.Size(m) +} +func (m *LeaveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LeaveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaveRequest proto.InternalMessageInfo + +func (m *LeaveRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type NodeResponse struct { + Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeResponse) Reset() { *m = NodeResponse{} } +func (m *NodeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeResponse) ProtoMessage() {} +func (*NodeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{7} +} + +func (m *NodeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeResponse.Unmarshal(m, b) +} +func (m *NodeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeResponse.Marshal(b, m, deterministic) +} +func (m *NodeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeResponse.Merge(m, src) +} +func (m *NodeResponse) XXX_Size() int { + return xxx_messageInfo_NodeResponse.Size(m) +} +func (m *NodeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeResponse proto.InternalMessageInfo + +func (m *NodeResponse) GetNode() *Node { + if m != nil { + return m.Node + } + return nil +} + +type ClusterResponse struct { + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterResponse) Reset() { *m = ClusterResponse{} } +func (m *ClusterResponse) String() string { return proto.CompactTextString(m) } +func (*ClusterResponse) ProtoMessage() {} +func (*ClusterResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{8} +} + +func (m *ClusterResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterResponse.Unmarshal(m, b) +} +func (m *ClusterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterResponse.Marshal(b, m, deterministic) +} +func (m *ClusterResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterResponse.Merge(m, src) +} +func (m *ClusterResponse) XXX_Size() int { + return xxx_messageInfo_ClusterResponse.Size(m) +} +func (m *ClusterResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterResponse proto.InternalMessageInfo + +func (m *ClusterResponse) GetCluster() *Cluster { + if m != nil { + return m.Cluster + } + return nil +} + +type Document struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Fields []byte `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} +func (*Document) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{9} +} + +func (m *Document) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Document.Unmarshal(m, b) +} +func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Document.Marshal(b, m, deterministic) +} +func (m *Document) XXX_Merge(src proto.Message) { + xxx_messageInfo_Document.Merge(m, src) +} +func (m *Document) XXX_Size() int { + return xxx_messageInfo_Document.Size(m) +} +func (m *Document) XXX_DiscardUnknown() { + xxx_messageInfo_Document.DiscardUnknown(m) +} + +var xxx_messageInfo_Document proto.InternalMessageInfo + +func (m *Document) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Document) GetFields() []byte { + if m != nil { + return m.Fields + } + return nil +} + +type GetRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} +func (*GetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{10} +} + +func (m *GetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetRequest.Unmarshal(m, b) +} +func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) +} +func (m *GetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRequest.Merge(m, src) +} +func (m *GetRequest) XXX_Size() int { + return xxx_messageInfo_GetRequest.Size(m) +} +func (m *GetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetRequest proto.InternalMessageInfo + +func (m *GetRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type GetResponse struct { + Fields []byte `protobuf:"bytes,1,opt,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} +func (*GetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{11} +} + +func (m *GetResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetResponse.Unmarshal(m, b) +} +func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) +} +func (m *GetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResponse.Merge(m, src) +} +func (m *GetResponse) XXX_Size() int { + return xxx_messageInfo_GetResponse.Size(m) +} +func (m *GetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetResponse proto.InternalMessageInfo + +func (m *GetResponse) GetFields() []byte { + if m != nil { + return m.Fields + } + return nil +} + +type SetRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Fields []byte `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetRequest) Reset() { *m = SetRequest{} } +func (m *SetRequest) String() string { return proto.CompactTextString(m) } +func (*SetRequest) ProtoMessage() {} +func (*SetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{12} +} + +func (m *SetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetRequest.Unmarshal(m, b) +} +func (m *SetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetRequest.Marshal(b, m, deterministic) +} +func (m *SetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetRequest.Merge(m, src) +} +func (m *SetRequest) XXX_Size() int { + return xxx_messageInfo_SetRequest.Size(m) +} +func (m *SetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetRequest proto.InternalMessageInfo + +func (m *SetRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *SetRequest) GetFields() []byte { + if m != nil { + return m.Fields + } + return nil +} + +type DeleteRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } +func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRequest) ProtoMessage() {} +func (*DeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{13} +} + +func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) +} +func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) +} +func (m *DeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteRequest.Merge(m, src) +} +func (m *DeleteRequest) XXX_Size() int { + return xxx_messageInfo_DeleteRequest.Size(m) +} +func (m *DeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo + +func (m *DeleteRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type BulkIndexRequest struct { + Requests []*SetRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BulkIndexRequest) Reset() { *m = BulkIndexRequest{} } +func (m *BulkIndexRequest) String() string { return proto.CompactTextString(m) } +func (*BulkIndexRequest) ProtoMessage() {} +func (*BulkIndexRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{14} +} + +func (m *BulkIndexRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkIndexRequest.Unmarshal(m, b) +} +func (m *BulkIndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkIndexRequest.Marshal(b, m, deterministic) +} +func (m *BulkIndexRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkIndexRequest.Merge(m, src) +} +func (m *BulkIndexRequest) XXX_Size() int { + return xxx_messageInfo_BulkIndexRequest.Size(m) +} +func (m *BulkIndexRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BulkIndexRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BulkIndexRequest proto.InternalMessageInfo + +func (m *BulkIndexRequest) GetRequests() []*SetRequest { + if m != nil { + return m.Requests + } + return nil +} + +type BulkIndexResponse struct { + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BulkIndexResponse) Reset() { *m = BulkIndexResponse{} } +func (m *BulkIndexResponse) String() string { return proto.CompactTextString(m) } +func (*BulkIndexResponse) ProtoMessage() {} +func (*BulkIndexResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{15} +} + +func (m *BulkIndexResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkIndexResponse.Unmarshal(m, b) +} +func (m *BulkIndexResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkIndexResponse.Marshal(b, m, deterministic) +} +func (m *BulkIndexResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkIndexResponse.Merge(m, src) +} +func (m *BulkIndexResponse) XXX_Size() int { + return xxx_messageInfo_BulkIndexResponse.Size(m) +} +func (m *BulkIndexResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BulkIndexResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BulkIndexResponse proto.InternalMessageInfo + +func (m *BulkIndexResponse) GetCount() int32 { + if m != nil { + return m.Count + } + return 0 +} + +type BulkDeleteRequest struct { + Requests []*DeleteRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BulkDeleteRequest) Reset() { *m = BulkDeleteRequest{} } +func (m *BulkDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*BulkDeleteRequest) ProtoMessage() {} +func (*BulkDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{16} +} + +func (m *BulkDeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkDeleteRequest.Unmarshal(m, b) +} +func (m *BulkDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkDeleteRequest.Marshal(b, m, deterministic) +} +func (m *BulkDeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkDeleteRequest.Merge(m, src) +} +func (m *BulkDeleteRequest) XXX_Size() int { + return xxx_messageInfo_BulkDeleteRequest.Size(m) +} +func (m *BulkDeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BulkDeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BulkDeleteRequest proto.InternalMessageInfo + +func (m *BulkDeleteRequest) GetRequests() []*DeleteRequest { + if m != nil { + return m.Requests + } + return nil +} + +type BulkDeleteResponse struct { + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BulkDeleteResponse) Reset() { *m = BulkDeleteResponse{} } +func (m *BulkDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*BulkDeleteResponse) ProtoMessage() {} +func (*BulkDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{17} +} + +func (m *BulkDeleteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BulkDeleteResponse.Unmarshal(m, b) +} +func (m *BulkDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BulkDeleteResponse.Marshal(b, m, deterministic) +} +func (m *BulkDeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BulkDeleteResponse.Merge(m, src) +} +func (m *BulkDeleteResponse) XXX_Size() int { + return xxx_messageInfo_BulkDeleteResponse.Size(m) +} +func (m *BulkDeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BulkDeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BulkDeleteResponse proto.InternalMessageInfo + +func (m *BulkDeleteResponse) GetCount() int32 { + if m != nil { + return m.Count + } + return 0 +} + +type SetMetadataRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Metadata *Metadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetMetadataRequest) Reset() { *m = SetMetadataRequest{} } +func (m *SetMetadataRequest) String() string { return proto.CompactTextString(m) } +func (*SetMetadataRequest) ProtoMessage() {} +func (*SetMetadataRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{18} +} + +func (m *SetMetadataRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetMetadataRequest.Unmarshal(m, b) +} +func (m *SetMetadataRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetMetadataRequest.Marshal(b, m, deterministic) +} +func (m *SetMetadataRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetMetadataRequest.Merge(m, src) +} +func (m *SetMetadataRequest) XXX_Size() int { + return xxx_messageInfo_SetMetadataRequest.Size(m) +} +func (m *SetMetadataRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetMetadataRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetMetadataRequest proto.InternalMessageInfo + +func (m *SetMetadataRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *SetMetadataRequest) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +type DeleteMetadataRequest struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteMetadataRequest) Reset() { *m = DeleteMetadataRequest{} } +func (m *DeleteMetadataRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteMetadataRequest) ProtoMessage() {} +func (*DeleteMetadataRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{19} +} + +func (m *DeleteMetadataRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteMetadataRequest.Unmarshal(m, b) +} +func (m *DeleteMetadataRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteMetadataRequest.Marshal(b, m, deterministic) +} +func (m *DeleteMetadataRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteMetadataRequest.Merge(m, src) +} +func (m *DeleteMetadataRequest) XXX_Size() int { + return xxx_messageInfo_DeleteMetadataRequest.Size(m) +} +func (m *DeleteMetadataRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteMetadataRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteMetadataRequest proto.InternalMessageInfo + +func (m *DeleteMetadataRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type SearchRequest struct { + SearchRequest []byte `protobuf:"bytes,1,opt,name=search_request,json=searchRequest,proto3" json:"search_request,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SearchRequest) Reset() { *m = SearchRequest{} } +func (m *SearchRequest) String() string { return proto.CompactTextString(m) } +func (*SearchRequest) ProtoMessage() {} +func (*SearchRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{20} +} + +func (m *SearchRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SearchRequest.Unmarshal(m, b) +} +func (m *SearchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SearchRequest.Marshal(b, m, deterministic) +} +func (m *SearchRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SearchRequest.Merge(m, src) +} +func (m *SearchRequest) XXX_Size() int { + return xxx_messageInfo_SearchRequest.Size(m) +} +func (m *SearchRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SearchRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SearchRequest proto.InternalMessageInfo + +func (m *SearchRequest) GetSearchRequest() []byte { + if m != nil { + return m.SearchRequest + } + return nil +} + +type SearchResponse struct { + SearchResult []byte `protobuf:"bytes,1,opt,name=search_result,json=searchResult,proto3" json:"search_result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SearchResponse) Reset() { *m = SearchResponse{} } +func (m *SearchResponse) String() string { return proto.CompactTextString(m) } +func (*SearchResponse) ProtoMessage() {} +func (*SearchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{21} +} + +func (m *SearchResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SearchResponse.Unmarshal(m, b) +} +func (m *SearchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SearchResponse.Marshal(b, m, deterministic) +} +func (m *SearchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SearchResponse.Merge(m, src) +} +func (m *SearchResponse) XXX_Size() int { + return xxx_messageInfo_SearchResponse.Size(m) +} +func (m *SearchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SearchResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SearchResponse proto.InternalMessageInfo + +func (m *SearchResponse) GetSearchResult() []byte { + if m != nil { + return m.SearchResult + } + return nil +} + +type MappingResponse struct { + Mapping []byte `protobuf:"bytes,1,opt,name=mapping,proto3" json:"mapping,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MappingResponse) Reset() { *m = MappingResponse{} } +func (m *MappingResponse) String() string { return proto.CompactTextString(m) } +func (*MappingResponse) ProtoMessage() {} +func (*MappingResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{22} +} + +func (m *MappingResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MappingResponse.Unmarshal(m, b) +} +func (m *MappingResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MappingResponse.Marshal(b, m, deterministic) +} +func (m *MappingResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MappingResponse.Merge(m, src) +} +func (m *MappingResponse) XXX_Size() int { + return xxx_messageInfo_MappingResponse.Size(m) +} +func (m *MappingResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MappingResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MappingResponse proto.InternalMessageInfo + +func (m *MappingResponse) GetMapping() []byte { + if m != nil { + return m.Mapping + } + return nil +} + +type Event struct { + Type Event_Type `protobuf:"varint,1,opt,name=type,proto3,enum=index.Event_Type" json:"type,omitempty"` + Data *any.Any `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{23} +} + +func (m *Event) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Event.Unmarshal(m, b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Event.Marshal(b, m, deterministic) +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return xxx_messageInfo_Event.Size(m) +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *Event) GetType() Event_Type { + if m != nil { + return m.Type + } + return Event_Unknown +} + +func (m *Event) GetData() *any.Any { + if m != nil { + return m.Data + } + return nil +} + +type WatchResponse struct { + Event *Event `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WatchResponse) Reset() { *m = WatchResponse{} } +func (m *WatchResponse) String() string { return proto.CompactTextString(m) } +func (*WatchResponse) ProtoMessage() {} +func (*WatchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{24} +} + +func (m *WatchResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WatchResponse.Unmarshal(m, b) +} +func (m *WatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WatchResponse.Marshal(b, m, deterministic) +} +func (m *WatchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchResponse.Merge(m, src) +} +func (m *WatchResponse) XXX_Size() int { + return xxx_messageInfo_WatchResponse.Size(m) +} +func (m *WatchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WatchResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WatchResponse proto.InternalMessageInfo + +func (m *WatchResponse) GetEvent() *Event { + if m != nil { + return m.Event + } + return nil +} + +type MetricsResponse struct { + Metrics []byte `protobuf:"bytes,1,opt,name=metrics,proto3" json:"metrics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricsResponse) Reset() { *m = MetricsResponse{} } +func (m *MetricsResponse) String() string { return proto.CompactTextString(m) } +func (*MetricsResponse) ProtoMessage() {} +func (*MetricsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_28043ab4bd817113, []int{25} +} + +func (m *MetricsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricsResponse.Unmarshal(m, b) +} +func (m *MetricsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricsResponse.Marshal(b, m, deterministic) +} +func (m *MetricsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricsResponse.Merge(m, src) +} +func (m *MetricsResponse) XXX_Size() int { + return xxx_messageInfo_MetricsResponse.Size(m) +} +func (m *MetricsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MetricsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricsResponse proto.InternalMessageInfo + +func (m *MetricsResponse) GetMetrics() []byte { + if m != nil { + return m.Metrics + } + return nil +} + +func init() { + proto.RegisterEnum("index.Event_Type", Event_Type_name, Event_Type_value) + proto.RegisterType((*LivenessCheckResponse)(nil), "index.LivenessCheckResponse") + proto.RegisterType((*ReadinessCheckResponse)(nil), "index.ReadinessCheckResponse") + proto.RegisterType((*Metadata)(nil), "index.Metadata") + proto.RegisterType((*Node)(nil), "index.Node") + proto.RegisterType((*Cluster)(nil), "index.Cluster") + proto.RegisterMapType((map[string]*Node)(nil), "index.Cluster.NodesEntry") + proto.RegisterType((*JoinRequest)(nil), "index.JoinRequest") + proto.RegisterType((*LeaveRequest)(nil), "index.LeaveRequest") + proto.RegisterType((*NodeResponse)(nil), "index.NodeResponse") + proto.RegisterType((*ClusterResponse)(nil), "index.ClusterResponse") + proto.RegisterType((*Document)(nil), "index.Document") + proto.RegisterType((*GetRequest)(nil), "index.GetRequest") + proto.RegisterType((*GetResponse)(nil), "index.GetResponse") + proto.RegisterType((*SetRequest)(nil), "index.SetRequest") + proto.RegisterType((*DeleteRequest)(nil), "index.DeleteRequest") + proto.RegisterType((*BulkIndexRequest)(nil), "index.BulkIndexRequest") + proto.RegisterType((*BulkIndexResponse)(nil), "index.BulkIndexResponse") + proto.RegisterType((*BulkDeleteRequest)(nil), "index.BulkDeleteRequest") + proto.RegisterType((*BulkDeleteResponse)(nil), "index.BulkDeleteResponse") + proto.RegisterType((*SetMetadataRequest)(nil), "index.SetMetadataRequest") + proto.RegisterType((*DeleteMetadataRequest)(nil), "index.DeleteMetadataRequest") + proto.RegisterType((*SearchRequest)(nil), "index.SearchRequest") + proto.RegisterType((*SearchResponse)(nil), "index.SearchResponse") + proto.RegisterType((*MappingResponse)(nil), "index.MappingResponse") + proto.RegisterType((*Event)(nil), "index.Event") + proto.RegisterType((*WatchResponse)(nil), "index.WatchResponse") + proto.RegisterType((*MetricsResponse)(nil), "index.MetricsResponse") +} + +func init() { proto.RegisterFile("protobuf/index.proto", fileDescriptor_28043ab4bd817113) } + +var fileDescriptor_28043ab4bd817113 = []byte{ + // 1166 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x57, 0xed, 0x6e, 0x1b, 0x45, + 0x17, 0xae, 0xbf, 0xdd, 0xe3, 0x8f, 0xb8, 0xa7, 0x76, 0xea, 0x6c, 0xd3, 0x36, 0x9d, 0x57, 0xd1, + 0x1b, 0x5c, 0x62, 0x97, 0x14, 0x10, 0x04, 0x81, 0x94, 0xb6, 0x56, 0x05, 0xa4, 0x51, 0xd9, 0x50, + 0x81, 0x00, 0x29, 0x9a, 0x78, 0x27, 0xce, 0x2a, 0xeb, 0xdd, 0x65, 0x77, 0xec, 0x62, 0xa1, 0xfe, + 0xe1, 0x16, 0xf8, 0xc5, 0x85, 0xc0, 0x8d, 0x70, 0x0b, 0x5c, 0x08, 0x9a, 0x8f, 0x5d, 0xef, 0xda, + 0xde, 0x46, 0xfc, 0xf2, 0xce, 0x9c, 0x67, 0x9e, 0xf3, 0xcc, 0x99, 0x99, 0xe7, 0xc8, 0xd0, 0xf6, + 0x03, 0x8f, 0x7b, 0xe7, 0xd3, 0x8b, 0x81, 0xed, 0x5a, 0xec, 0x97, 0xbe, 0x1c, 0x62, 0x49, 0x0e, + 0x8c, 0xad, 0xb1, 0xe7, 0x8d, 0x1d, 0x36, 0x88, 0x31, 0xd4, 0x9d, 0x2b, 0x84, 0x71, 0x77, 0x39, + 0xc4, 0x26, 0x3e, 0x8f, 0x82, 0xdb, 0x3a, 0x48, 0x7d, 0x7b, 0x40, 0x5d, 0xd7, 0xe3, 0x94, 0xdb, + 0x9e, 0x1b, 0xea, 0xe8, 0xfb, 0xf2, 0x67, 0xb4, 0x3f, 0x66, 0xee, 0x7e, 0xf8, 0x86, 0x8e, 0xc7, + 0x2c, 0x18, 0x78, 0xbe, 0x44, 0xac, 0xa2, 0xc9, 0x3e, 0x74, 0x8e, 0xed, 0x19, 0x73, 0x59, 0x18, + 0x3e, 0xbb, 0x64, 0xa3, 0x2b, 0x93, 0x85, 0xbe, 0xe7, 0x86, 0x0c, 0xdb, 0x50, 0xa2, 0x8e, 0x3d, + 0x63, 0xdd, 0xdc, 0x4e, 0x6e, 0xaf, 0x6a, 0xaa, 0x01, 0xe9, 0xc3, 0xa6, 0xc9, 0xa8, 0x65, 0xaf, + 0xc5, 0x07, 0x8c, 0x5a, 0xf3, 0x08, 0x2f, 0x07, 0xe4, 0x15, 0x54, 0x5f, 0x32, 0x4e, 0x2d, 0xca, + 0x29, 0x3e, 0x84, 0xfa, 0x38, 0xf0, 0x47, 0x67, 0xd4, 0xb2, 0x02, 0x16, 0x86, 0x12, 0x78, 0xd3, + 0xac, 0x89, 0xb9, 0x23, 0x35, 0x25, 0x20, 0x97, 0x9c, 0xfb, 0x31, 0x24, 0xaf, 0x20, 0x62, 0x4e, + 0x43, 0x88, 0x03, 0xc5, 0x13, 0xcf, 0x62, 0x02, 0x1a, 0xd0, 0x0b, 0xbe, 0xcc, 0x26, 0xe6, 0x22, + 0xb6, 0x47, 0x50, 0x9d, 0xe8, 0xe4, 0x92, 0xa9, 0x76, 0xb0, 0xd1, 0x57, 0xc7, 0x10, 0x69, 0x32, + 0x63, 0x80, 0xd0, 0x1f, 0x72, 0xca, 0x59, 0xb7, 0x20, 0x89, 0xd4, 0x80, 0xfc, 0x91, 0x83, 0xca, + 0x33, 0x67, 0x1a, 0x72, 0x16, 0xe0, 0x00, 0x4a, 0xae, 0x67, 0x31, 0x91, 0xaa, 0xb0, 0x57, 0x3b, + 0xd8, 0xd2, 0x5c, 0x3a, 0xdc, 0x17, 0xaa, 0xc2, 0xa1, 0xcb, 0x83, 0xb9, 0xa9, 0x70, 0xb8, 0x09, + 0x65, 0x87, 0x51, 0x8b, 0x05, 0x7a, 0x1f, 0x7a, 0x64, 0x0c, 0x01, 0x16, 0x60, 0x6c, 0x41, 0xe1, + 0x8a, 0xcd, 0xb5, 0x7e, 0xf1, 0x89, 0x0f, 0xa1, 0x34, 0xa3, 0xce, 0x94, 0x69, 0xd1, 0x35, 0x9d, + 0x48, 0xac, 0x31, 0x55, 0xe4, 0x30, 0xff, 0x49, 0x8e, 0x7c, 0x01, 0xb5, 0xaf, 0x3c, 0xdb, 0x35, + 0xd9, 0xcf, 0x53, 0x16, 0x72, 0x6c, 0x42, 0xde, 0xb6, 0x34, 0x4d, 0xde, 0xb6, 0xf0, 0x01, 0x14, + 0x85, 0x8c, 0x75, 0x24, 0x32, 0x40, 0xee, 0x43, 0xfd, 0x98, 0xd1, 0x19, 0xcb, 0x20, 0x20, 0x03, + 0xa8, 0x4b, 0x74, 0x74, 0xc2, 0x11, 0x61, 0x2e, 0x8b, 0xf0, 0x33, 0xd8, 0xd0, 0xc5, 0x88, 0xd7, + 0xec, 0x41, 0x65, 0xa4, 0xa6, 0xf4, 0xb2, 0x66, 0xba, 0x6a, 0x66, 0x14, 0x26, 0x07, 0x50, 0x7d, + 0xee, 0x8d, 0xa6, 0x13, 0xe6, 0xae, 0x6e, 0x65, 0x13, 0xca, 0x17, 0x36, 0x73, 0x2c, 0x75, 0x21, + 0xea, 0xa6, 0x1e, 0x91, 0x6d, 0x80, 0x17, 0x8c, 0x67, 0xe9, 0xdf, 0x85, 0x9a, 0x8c, 0x6a, 0x29, + 0x0b, 0x92, 0x5c, 0x8a, 0xe4, 0x43, 0x80, 0xd3, 0x4c, 0x92, 0xcc, 0xd4, 0x0f, 0xa0, 0xf1, 0x9c, + 0x39, 0x8c, 0x67, 0x56, 0xef, 0x08, 0x5a, 0x4f, 0xa7, 0xce, 0xd5, 0x97, 0x62, 0xb7, 0x11, 0x66, + 0x1f, 0xaa, 0x81, 0xfa, 0x8c, 0x2e, 0xd1, 0x2d, 0x5d, 0x8e, 0x85, 0x02, 0x33, 0x86, 0x90, 0xf7, + 0xe0, 0x56, 0x82, 0x62, 0xf1, 0xce, 0x46, 0xde, 0xd4, 0xe5, 0x32, 0x55, 0xc9, 0x54, 0x03, 0x32, + 0x54, 0xd0, 0xb4, 0xa4, 0xc7, 0x2b, 0xe9, 0xda, 0x3a, 0x5d, 0x0a, 0x97, 0xc8, 0xd8, 0x03, 0x4c, + 0xd2, 0xbc, 0x33, 0xe5, 0x37, 0x80, 0xa7, 0x8c, 0xc7, 0x2f, 0x29, 0xa3, 0x7e, 0xff, 0xe5, 0x0d, + 0x92, 0xff, 0x43, 0x47, 0xa5, 0xbe, 0x86, 0x95, 0x7c, 0x0c, 0x8d, 0x53, 0x46, 0x83, 0xd1, 0x65, + 0x04, 0xd8, 0x85, 0x66, 0x28, 0x27, 0xce, 0xf4, 0x5e, 0xf4, 0x21, 0x37, 0xc2, 0x24, 0x8c, 0x7c, + 0x04, 0xcd, 0x68, 0x9d, 0xde, 0xdb, 0xff, 0xa0, 0x11, 0x2f, 0x0c, 0xa7, 0x4e, 0xb4, 0xae, 0x1e, + 0xad, 0x13, 0x73, 0xe4, 0x11, 0x6c, 0xbc, 0xa4, 0xbe, 0x6f, 0xbb, 0xe3, 0x78, 0x5d, 0x17, 0x2a, + 0x13, 0x35, 0xa5, 0x57, 0x44, 0x43, 0xf2, 0x57, 0x0e, 0x4a, 0xc3, 0x99, 0xb8, 0xc6, 0xbb, 0x50, + 0xe4, 0x73, 0x5f, 0x3d, 0x98, 0x66, 0x7c, 0xd4, 0x32, 0xd6, 0xff, 0x76, 0xee, 0x33, 0x53, 0x86, + 0x71, 0x0f, 0x8a, 0x89, 0xf2, 0xb4, 0xfb, 0xca, 0xdd, 0xfb, 0x91, 0xf5, 0xf7, 0x8f, 0xdc, 0xb9, + 0x29, 0x11, 0xe4, 0x27, 0x28, 0x8a, 0x75, 0x58, 0x83, 0xca, 0x6b, 0xf7, 0xca, 0xf5, 0xde, 0xb8, + 0xad, 0x1b, 0x58, 0x85, 0xa2, 0xb0, 0x81, 0x56, 0x0e, 0x6f, 0x42, 0x49, 0x3e, 0xe8, 0x56, 0x1e, + 0x2b, 0x50, 0x38, 0x65, 0xbc, 0x55, 0x40, 0x80, 0xb2, 0x2a, 0x69, 0xab, 0x88, 0x0d, 0xb8, 0x19, + 0xdf, 0xa7, 0x56, 0x09, 0x9b, 0x00, 0x8b, 0xc3, 0x6e, 0x95, 0xc9, 0x13, 0x68, 0x7c, 0x47, 0x79, + 0xa2, 0x36, 0x04, 0x4a, 0x4c, 0x88, 0xd5, 0x4f, 0xb7, 0x9e, 0xdc, 0x80, 0xa9, 0x42, 0xb2, 0x34, + 0x8c, 0x07, 0xf6, 0x28, 0x4c, 0x95, 0x46, 0x4d, 0xc5, 0xa5, 0x51, 0xc3, 0x83, 0x3f, 0x01, 0x4a, + 0x32, 0x3b, 0x52, 0x68, 0xa4, 0xda, 0x0e, 0x6e, 0xae, 0x6c, 0x7b, 0x28, 0x3a, 0x9e, 0xb1, 0xad, + 0x93, 0xae, 0x6d, 0x52, 0xc4, 0xf8, 0xed, 0xef, 0x7f, 0x7e, 0xcf, 0xb7, 0x11, 0x07, 0xb3, 0x0f, + 0x06, 0x8e, 0x86, 0x9c, 0x8d, 0x24, 0xa3, 0x05, 0xcd, 0x74, 0xab, 0xca, 0xcc, 0x71, 0x4f, 0xe7, + 0x58, 0xdf, 0xd9, 0xc8, 0x5d, 0x99, 0xa4, 0x83, 0xb7, 0x45, 0x92, 0x20, 0xc2, 0xe8, 0x2c, 0x43, + 0xdd, 0x8e, 0xb2, 0xb8, 0x6f, 0x27, 0x6d, 0x32, 0x62, 0x6c, 0x49, 0x46, 0xc0, 0xaa, 0x60, 0x14, + 0xd6, 0x89, 0xa6, 0x3a, 0x44, 0x44, 0x0d, 0x4f, 0x18, 0xbb, 0x91, 0x41, 0x4d, 0xee, 0x4b, 0x96, + 0xae, 0xd1, 0x12, 0x2c, 0xda, 0x46, 0x07, 0xbf, 0xda, 0xd6, 0xdb, 0x43, 0x69, 0xc7, 0x78, 0xb2, + 0x68, 0x5d, 0x59, 0xea, 0x36, 0x97, 0xdc, 0x38, 0x12, 0x78, 0x5b, 0x52, 0x37, 0xb0, 0x96, 0xa0, + 0xc6, 0x13, 0x7d, 0xbd, 0x30, 0xda, 0x53, 0xb2, 0x7b, 0x64, 0xaa, 0xec, 0x4a, 0x2a, 0xec, 0xad, + 0xa8, 0xc4, 0x57, 0x50, 0x3d, 0x75, 0xa9, 0x1f, 0x5e, 0x7a, 0xfc, 0x1d, 0x02, 0xd7, 0xb3, 0xb6, + 0x25, 0x6b, 0x13, 0xeb, 0x82, 0x35, 0x8c, 0x58, 0xbe, 0x4f, 0x5c, 0x70, 0xbc, 0xa3, 0x55, 0x2e, + 0xbb, 0xb0, 0xd1, 0x5d, 0x0d, 0xe8, 0x6d, 0x6b, 0xad, 0x46, 0x43, 0xb0, 0x5a, 0xba, 0x1b, 0x85, + 0x87, 0xb9, 0x1e, 0xfe, 0x98, 0x7c, 0x2b, 0x98, 0x64, 0x48, 0x59, 0xa9, 0xb1, 0xb5, 0x26, 0x92, + 0x26, 0xef, 0xad, 0x92, 0x7f, 0x0d, 0x85, 0x17, 0x8c, 0x63, 0x64, 0x10, 0x8b, 0x96, 0x66, 0x60, + 0x72, 0x4a, 0xf3, 0xdc, 0x93, 0x3c, 0x77, 0xb0, 0x93, 0xe2, 0x11, 0x25, 0xfd, 0xbc, 0xd7, 0x7b, + 0x8b, 0xa6, 0x7c, 0xf9, 0xb8, 0xda, 0x58, 0x32, 0x6b, 0xb9, 0x23, 0x09, 0x0d, 0x63, 0x3d, 0xa1, + 0x10, 0xf8, 0x3a, 0x32, 0x11, 0x5c, 0xdb, 0x40, 0x32, 0x99, 0xb5, 0xd4, 0x5e, 0x86, 0xd4, 0x63, + 0x28, 0x2b, 0x37, 0x8e, 0x69, 0x53, 0xa6, 0x6e, 0x74, 0x96, 0x66, 0x75, 0x01, 0x3a, 0x92, 0x75, + 0x83, 0x80, 0x3c, 0x7b, 0x19, 0x13, 0x22, 0x4f, 0xa0, 0xa2, 0x4d, 0xfa, 0xda, 0xeb, 0xbe, 0x64, + 0xe6, 0xe9, 0xeb, 0xae, 0x7d, 0x1c, 0x3f, 0x85, 0x92, 0xb4, 0xc3, 0x4c, 0xb6, 0x48, 0x74, 0xca, + 0x34, 0xc9, 0x8d, 0xc7, 0x39, 0x29, 0x45, 0x59, 0xde, 0xf5, 0x52, 0xd2, 0xe6, 0xb9, 0x24, 0x45, + 0x05, 0x9f, 0x92, 0x1f, 0x76, 0xc6, 0x36, 0xbf, 0x9c, 0x9e, 0xf7, 0x47, 0xde, 0x64, 0x30, 0xf1, + 0xc2, 0xe9, 0x15, 0x1d, 0x9c, 0x3b, 0x34, 0xe4, 0xf1, 0x1f, 0x84, 0xf3, 0xb2, 0xfc, 0x7a, 0xf2, + 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xd3, 0x19, 0x60, 0x72, 0x0c, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// IndexClient is the client API for Index service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type IndexClient interface { + LivenessCheck(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessCheckResponse, error) + ReadinessCheck(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessCheckResponse, error) + Node(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeResponse, error) + Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Cluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterResponse, error) + Leave(ctx context.Context, in *LeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) + BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) + BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) + Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) + Mapping(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*MappingResponse, error) + Watch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_WatchClient, error) + Metrics(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*MetricsResponse, error) +} + +type indexClient struct { + cc *grpc.ClientConn +} + +func NewIndexClient(cc *grpc.ClientConn) IndexClient { + return &indexClient{cc} +} + +func (c *indexClient) LivenessCheck(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessCheckResponse, error) { + out := new(LivenessCheckResponse) + err := c.cc.Invoke(ctx, "/index.Index/LivenessCheck", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) ReadinessCheck(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessCheckResponse, error) { + out := new(ReadinessCheckResponse) + err := c.cc.Invoke(ctx, "/index.Index/ReadinessCheck", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Node(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeResponse, error) { + out := new(NodeResponse) + err := c.cc.Invoke(ctx, "/index.Index/Node", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Join", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Cluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterResponse, error) { + out := new(ClusterResponse) + err := c.cc.Invoke(ctx, "/index.Index/Cluster", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Leave(ctx context.Context, in *LeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Leave", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Snapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) { + out := new(BulkIndexResponse) + err := c.cc.Invoke(ctx, "/index.Index/BulkIndex", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) { + out := new(BulkDeleteResponse) + err := c.cc.Invoke(ctx, "/index.Index/BulkDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { + out := new(GetResponse) + err := c.cc.Invoke(ctx, "/index.Index/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Set", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/index.Index/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { + out := new(SearchResponse) + err := c.cc.Invoke(ctx, "/index.Index/Search", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Mapping(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*MappingResponse, error) { + out := new(MappingResponse) + err := c.cc.Invoke(ctx, "/index.Index/Mapping", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *indexClient) Watch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_WatchClient, error) { + stream, err := c.cc.NewStream(ctx, &_Index_serviceDesc.Streams[0], "/index.Index/Watch", opts...) + if err != nil { + return nil, err + } + x := &indexWatchClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Index_WatchClient interface { + Recv() (*WatchResponse, error) + grpc.ClientStream +} + +type indexWatchClient struct { + grpc.ClientStream +} + +func (x *indexWatchClient) Recv() (*WatchResponse, error) { + m := new(WatchResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *indexClient) Metrics(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*MetricsResponse, error) { + out := new(MetricsResponse) + err := c.cc.Invoke(ctx, "/index.Index/Metrics", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// IndexServer is the server API for Index service. +type IndexServer interface { + LivenessCheck(context.Context, *empty.Empty) (*LivenessCheckResponse, error) + ReadinessCheck(context.Context, *empty.Empty) (*ReadinessCheckResponse, error) + Node(context.Context, *empty.Empty) (*NodeResponse, error) + Join(context.Context, *JoinRequest) (*empty.Empty, error) + Cluster(context.Context, *empty.Empty) (*ClusterResponse, error) + Leave(context.Context, *LeaveRequest) (*empty.Empty, error) + Snapshot(context.Context, *empty.Empty) (*empty.Empty, error) + BulkIndex(context.Context, *BulkIndexRequest) (*BulkIndexResponse, error) + BulkDelete(context.Context, *BulkDeleteRequest) (*BulkDeleteResponse, error) + Get(context.Context, *GetRequest) (*GetResponse, error) + Set(context.Context, *SetRequest) (*empty.Empty, error) + Delete(context.Context, *DeleteRequest) (*empty.Empty, error) + Search(context.Context, *SearchRequest) (*SearchResponse, error) + Mapping(context.Context, *empty.Empty) (*MappingResponse, error) + Watch(*empty.Empty, Index_WatchServer) error + Metrics(context.Context, *empty.Empty) (*MetricsResponse, error) +} + +// UnimplementedIndexServer can be embedded to have forward compatible implementations. +type UnimplementedIndexServer struct { +} + +func (*UnimplementedIndexServer) LivenessCheck(ctx context.Context, req *empty.Empty) (*LivenessCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LivenessCheck not implemented") +} +func (*UnimplementedIndexServer) ReadinessCheck(ctx context.Context, req *empty.Empty) (*ReadinessCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadinessCheck not implemented") +} +func (*UnimplementedIndexServer) Node(ctx context.Context, req *empty.Empty) (*NodeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Node not implemented") +} +func (*UnimplementedIndexServer) Join(ctx context.Context, req *JoinRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Join not implemented") +} +func (*UnimplementedIndexServer) Cluster(ctx context.Context, req *empty.Empty) (*ClusterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Cluster not implemented") +} +func (*UnimplementedIndexServer) Leave(ctx context.Context, req *LeaveRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Leave not implemented") +} +func (*UnimplementedIndexServer) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Snapshot not implemented") +} +func (*UnimplementedIndexServer) BulkIndex(ctx context.Context, req *BulkIndexRequest) (*BulkIndexResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BulkIndex not implemented") +} +func (*UnimplementedIndexServer) BulkDelete(ctx context.Context, req *BulkDeleteRequest) (*BulkDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BulkDelete not implemented") +} +func (*UnimplementedIndexServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (*UnimplementedIndexServer) Set(ctx context.Context, req *SetRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") +} +func (*UnimplementedIndexServer) Delete(ctx context.Context, req *DeleteRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (*UnimplementedIndexServer) Search(ctx context.Context, req *SearchRequest) (*SearchResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Search not implemented") +} +func (*UnimplementedIndexServer) Mapping(ctx context.Context, req *empty.Empty) (*MappingResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Mapping not implemented") +} +func (*UnimplementedIndexServer) Watch(req *empty.Empty, srv Index_WatchServer) error { + return status.Errorf(codes.Unimplemented, "method Watch not implemented") +} +func (*UnimplementedIndexServer) Metrics(ctx context.Context, req *empty.Empty) (*MetricsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Metrics not implemented") +} + +func RegisterIndexServer(s *grpc.Server, srv IndexServer) { + s.RegisterService(&_Index_serviceDesc, srv) +} + +func _Index_LivenessCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).LivenessCheck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/LivenessCheck", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).LivenessCheck(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_ReadinessCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).ReadinessCheck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/ReadinessCheck", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).ReadinessCheck(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Node_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Node(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Node", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Node(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Join_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(JoinRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Join(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Join", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Join(ctx, req.(*JoinRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Cluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Cluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Cluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Cluster(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Leave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Leave(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Leave", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Leave(ctx, req.(*LeaveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Snapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Snapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Snapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Snapshot(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_BulkIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BulkIndexRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).BulkIndex(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/BulkIndex", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).BulkIndex(ctx, req.(*BulkIndexRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_BulkDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BulkDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).BulkDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/BulkDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).BulkDelete(ctx, req.(*BulkDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Get(ctx, req.(*GetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Set(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Set", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Set(ctx, req.(*SetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Delete(ctx, req.(*DeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Search(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Search", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Search(ctx, req.(*SearchRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Mapping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Mapping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Mapping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Mapping(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Index_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(empty.Empty) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(IndexServer).Watch(m, &indexWatchServer{stream}) +} + +type Index_WatchServer interface { + Send(*WatchResponse) error + grpc.ServerStream +} + +type indexWatchServer struct { + grpc.ServerStream +} + +func (x *indexWatchServer) Send(m *WatchResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Index_Metrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(empty.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServer).Metrics(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/index.Index/Metrics", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServer).Metrics(ctx, req.(*empty.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _Index_serviceDesc = grpc.ServiceDesc{ + ServiceName: "index.Index", + HandlerType: (*IndexServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "LivenessCheck", + Handler: _Index_LivenessCheck_Handler, + }, + { + MethodName: "ReadinessCheck", + Handler: _Index_ReadinessCheck_Handler, + }, + { + MethodName: "Node", + Handler: _Index_Node_Handler, + }, + { + MethodName: "Join", + Handler: _Index_Join_Handler, + }, + { + MethodName: "Cluster", + Handler: _Index_Cluster_Handler, + }, + { + MethodName: "Leave", + Handler: _Index_Leave_Handler, + }, + { + MethodName: "Snapshot", + Handler: _Index_Snapshot_Handler, + }, + { + MethodName: "BulkIndex", + Handler: _Index_BulkIndex_Handler, + }, + { + MethodName: "BulkDelete", + Handler: _Index_BulkDelete_Handler, + }, + { + MethodName: "Get", + Handler: _Index_Get_Handler, + }, + { + MethodName: "Set", + Handler: _Index_Set_Handler, + }, + { + MethodName: "Delete", + Handler: _Index_Delete_Handler, + }, + { + MethodName: "Search", + Handler: _Index_Search_Handler, + }, + { + MethodName: "Mapping", + Handler: _Index_Mapping_Handler, + }, + { + MethodName: "Metrics", + Handler: _Index_Metrics_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Index_Watch_Handler, + ServerStreams: true, + }, + }, + Metadata: "protobuf/index.proto", +} diff --git a/protobuf/index.pb.gw.go b/protobuf/index.pb.gw.go new file mode 100644 index 0000000..810b9ed --- /dev/null +++ b/protobuf/index.pb.gw.go @@ -0,0 +1,1276 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: protobuf/index.proto + +/* +Package protobuf is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package protobuf + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/empty" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage + +func request_Index_LivenessCheck_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.LivenessCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_LivenessCheck_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.LivenessCheck(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_ReadinessCheck_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.ReadinessCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_ReadinessCheck_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.ReadinessCheck(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Node_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.Node(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Node_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.Node(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Join_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq JoinRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Node); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Join(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Join_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq JoinRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Node); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.Join(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Cluster_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.Cluster(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Cluster_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.Cluster(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Leave_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq LeaveRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Leave(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Leave_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq LeaveRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.Leave(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.Snapshot(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.Snapshot(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_BulkIndex_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkIndexRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BulkIndex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_BulkIndex_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkIndexRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.BulkIndex(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_BulkDelete_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkDeleteRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BulkDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_BulkDelete_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BulkDeleteRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.BulkDelete(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Get_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Get_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.Get(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Set_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SetRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Set(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Set_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SetRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.Set(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Delete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Delete_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.Delete(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Search_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SearchRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Search(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Search_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SearchRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Search(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Mapping_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.Mapping(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Mapping_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.Mapping(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Index_Metrics_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := client.Metrics(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Index_Metrics_0(ctx context.Context, marshaler runtime.Marshaler, server IndexServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq empty.Empty + var metadata runtime.ServerMetadata + + msg, err := server.Metrics(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterIndexHandlerServer registers the http handlers for service Index to "mux". +// UnaryRPC :call IndexServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +func RegisterIndexHandlerServer(ctx context.Context, mux *runtime.ServeMux, server IndexServer) error { + + mux.Handle("GET", pattern_Index_LivenessCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_LivenessCheck_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_LivenessCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_ReadinessCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_ReadinessCheck_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_ReadinessCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Node_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Node_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Node_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_Join_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Join_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Join_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Cluster_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Cluster_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Cluster_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_Leave_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Leave_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Leave_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Snapshot_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Snapshot_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_BulkIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_BulkIndex_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_BulkIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_BulkDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_BulkDelete_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_BulkDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Get_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_Set_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Set_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Set_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Delete_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Index_Search_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Search_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Search_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Mapping_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Mapping_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Mapping_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Metrics_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Index_Metrics_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Metrics_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterIndexHandlerFromEndpoint is same as RegisterIndexHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterIndexHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterIndexHandler(ctx, mux, conn) +} + +// RegisterIndexHandler registers the http handlers for service Index to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterIndexHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterIndexHandlerClient(ctx, mux, NewIndexClient(conn)) +} + +// RegisterIndexHandlerClient registers the http handlers for service Index +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "IndexClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "IndexClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "IndexClient" to call the correct interceptors. +func RegisterIndexHandlerClient(ctx context.Context, mux *runtime.ServeMux, client IndexClient) error { + + mux.Handle("GET", pattern_Index_LivenessCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_LivenessCheck_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_LivenessCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_ReadinessCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_ReadinessCheck_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_ReadinessCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Node_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Node_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Node_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_Join_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Join_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Join_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Cluster_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Cluster_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Cluster_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_Leave_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Leave_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Leave_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Snapshot_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_BulkIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_BulkIndex_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_BulkIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_BulkDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_BulkDelete_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_BulkDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Get_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_Index_Set_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Set_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Set_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Index_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Delete_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Index_Search_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Search_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Search_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Mapping_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Mapping_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Mapping_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Index_Metrics_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Index_Metrics_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Index_Metrics_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Index_LivenessCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "liveness_check"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_ReadinessCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "readiness_check"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Node_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "node"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Join_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1", "cluster", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Cluster_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "cluster"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Leave_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1", "cluster", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "snapshot"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_BulkIndex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "documents"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_BulkDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "documents"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Set_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Search_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "search"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Mapping_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "mapping"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Index_Metrics_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Index_LivenessCheck_0 = runtime.ForwardResponseMessage + + forward_Index_ReadinessCheck_0 = runtime.ForwardResponseMessage + + forward_Index_Node_0 = runtime.ForwardResponseMessage + + forward_Index_Join_0 = runtime.ForwardResponseMessage + + forward_Index_Cluster_0 = runtime.ForwardResponseMessage + + forward_Index_Leave_0 = runtime.ForwardResponseMessage + + forward_Index_Snapshot_0 = runtime.ForwardResponseMessage + + forward_Index_BulkIndex_0 = runtime.ForwardResponseMessage + + forward_Index_BulkDelete_0 = runtime.ForwardResponseMessage + + forward_Index_Get_0 = runtime.ForwardResponseMessage + + forward_Index_Set_0 = runtime.ForwardResponseMessage + + forward_Index_Delete_0 = runtime.ForwardResponseMessage + + forward_Index_Search_0 = runtime.ForwardResponseMessage + + forward_Index_Mapping_0 = runtime.ForwardResponseMessage + + forward_Index_Metrics_0 = runtime.ForwardResponseMessage +) diff --git a/protobuf/index.proto b/protobuf/index.proto new file mode 100644 index 0000000..9eb168b --- /dev/null +++ b/protobuf/index.proto @@ -0,0 +1,223 @@ +syntax = "proto3"; + +import "google/protobuf/any.proto"; +import "google/protobuf/empty.proto"; +import "google/api/annotations.proto"; +import "protoc-gen-swagger/options/annotations.proto"; + +package index; + +option go_package = "github.com/mosuka/blast/protobuf"; + +service Index { + rpc LivenessCheck (google.protobuf.Empty) returns (LivenessCheckResponse) { + option (google.api.http) = { + get: "/v1/liveness_check" + }; + } + + rpc ReadinessCheck (google.protobuf.Empty) returns (ReadinessCheckResponse) { + option (google.api.http) = { + get: "/v1/readiness_check" + }; + } + + rpc Node (google.protobuf.Empty) returns (NodeResponse) { + option (google.api.http) = { + get: "/v1/node" + }; + } + rpc Join (JoinRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + put: "/v1/cluster/{id}" + body: "node" + }; + } + rpc Cluster (google.protobuf.Empty) returns (ClusterResponse) { + option (google.api.http) = { + get: "/v1/cluster" + }; + } + rpc Leave (LeaveRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/cluster/{id}" + }; + } + + rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) { + option (google.api.http) = { + get: "/v1/snapshot" + }; + } + + rpc BulkIndex (BulkIndexRequest) returns (BulkIndexResponse) { + option (google.api.http) = { + put: "/v1/documents" + body: "*" + }; + } + rpc BulkDelete (BulkDeleteRequest) returns (BulkDeleteResponse) { + option (google.api.http) = { + delete: "/v1/documents" + body: "*" + }; + } + rpc Get (GetRequest) returns (GetResponse) { + option (google.api.http) = { + get: "/v1/documents/{id=**}" + }; + } + rpc Set (SetRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + put: "/v1/documents/{id=**}" + body: "*" + }; + } + rpc Delete (DeleteRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/documents/{id=**}" + }; + } + rpc Search (SearchRequest) returns (SearchResponse) { + option (google.api.http) = { + post: "/v1/search" + body: "*" + }; + } + + rpc Mapping (google.protobuf.Empty) returns (MappingResponse) { + option (google.api.http) = { + get: "/v1/mapping" + }; + } + + rpc Watch (google.protobuf.Empty) returns (stream WatchResponse) {} + + rpc Metrics (google.protobuf.Empty) returns (MetricsResponse) { + option (google.api.http) = { + get: "/v1/metrics" + }; + } +} + +message LivenessCheckResponse { + bool alive = 1; +} + +message ReadinessCheckResponse { + bool ready = 1; +} + +message Metadata { + string grpc_address = 1; + string http_address = 2; +} + +message Node { + string raft_address = 1; + Metadata metadata = 2; + string state = 3; +} + +message Cluster { + map nodes = 1; + string leader = 2; +} + +message JoinRequest { + string id = 1; + Node node = 2; +} + +message LeaveRequest { + string id = 1; +} + +message NodeResponse { + Node node = 1; +} + +message ClusterResponse { + Cluster cluster = 1; +} + +message Document { + string id = 1; + bytes fields = 2; +} + +message GetRequest { + string id = 1; +} + +message GetResponse { + bytes fields = 1; +} + +message SetRequest { + string id = 1; + bytes fields = 2; +} + +message DeleteRequest { + string id = 1; +} + +message BulkIndexRequest { + repeated SetRequest requests = 1; +} + +message BulkIndexResponse { + int32 count = 1; +} + +message BulkDeleteRequest { + repeated DeleteRequest requests = 1; +} + +message BulkDeleteResponse { + int32 count = 1; +} + +message SetMetadataRequest { + string id = 1; + Metadata metadata = 2; +} + +message DeleteMetadataRequest { + string id = 1; +} + +message SearchRequest { + bytes search_request = 1; +} + +message SearchResponse { + bytes search_result = 1; +} + +message MappingResponse { + bytes mapping = 1; +} + +message Event { + enum Type { + Unknown = 0; + Join = 1; + Leave = 2; + Set = 3; + Delete = 4; + BulkIndex = 5; + BulkDelete = 6; + } + Type type = 1; + google.protobuf.Any data = 2; +} + +message WatchResponse { + Event event = 1; +} + +message MetricsResponse { + bytes metrics = 1; +} diff --git a/protobuf/index/index.go b/protobuf/index/index.go deleted file mode 100644 index 31a3023..0000000 --- a/protobuf/index/index.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package index - -import ( - "encoding/json" - "errors" - - "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/protobuf" -) - -func MarshalDocument(doc *Document) ([]byte, error) { - if doc == nil { - return nil, errors.New("nil") - } - - fieldsIntr, err := protobuf.MarshalAny(doc.Fields) - if err != nil { - return nil, err - } - - docMap := map[string]interface{}{ - "id": doc.Id, - "fields": *fieldsIntr.(*map[string]interface{}), - } - - docBytes, err := json.Marshal(docMap) - if err != nil { - return nil, err - } - - return docBytes, nil -} - -func UnmarshalDocument(data []byte, doc *Document) error { - var err error - - if data == nil || len(data) <= 0 || doc == nil { - return nil - } - - var docMap map[string]interface{} - err = json.Unmarshal(data, &docMap) - if err != nil { - return err - } - - if id, ok := docMap["id"].(string); ok { - doc.Id = id - } - - if fieldsMap, ok := docMap["fields"].(map[string]interface{}); ok { - fieldsAny := &any.Any{} - err = protobuf.UnmarshalAny(fieldsMap, fieldsAny) - if err != nil { - return err - } - doc.Fields = fieldsAny - } - - return nil -} diff --git a/protobuf/index/index.pb.go b/protobuf/index/index.pb.go deleted file mode 100644 index b60dbee..0000000 --- a/protobuf/index/index.pb.go +++ /dev/null @@ -1,2051 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: protobuf/index/index.proto - -package index - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" - empty "github.com/golang/protobuf/ptypes/empty" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type NodeHealthCheckRequest_Probe int32 - -const ( - NodeHealthCheckRequest_UNKNOWN NodeHealthCheckRequest_Probe = 0 - NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 1 - NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 2 - NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 3 -) - -var NodeHealthCheckRequest_Probe_name = map[int32]string{ - 0: "UNKNOWN", - 1: "HEALTHINESS", - 2: "LIVENESS", - 3: "READINESS", -} - -var NodeHealthCheckRequest_Probe_value = map[string]int32{ - "UNKNOWN": 0, - "HEALTHINESS": 1, - "LIVENESS": 2, - "READINESS": 3, -} - -func (x NodeHealthCheckRequest_Probe) String() string { - return proto.EnumName(NodeHealthCheckRequest_Probe_name, int32(x)) -} - -func (NodeHealthCheckRequest_Probe) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{0, 0} -} - -type NodeHealthCheckResponse_State int32 - -const ( - NodeHealthCheckResponse_UNKNOWN NodeHealthCheckResponse_State = 0 - NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 1 - NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 2 - NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 3 - NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 4 - NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 5 - NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 6 -) - -var NodeHealthCheckResponse_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "HEALTHY", - 2: "UNHEALTHY", - 3: "ALIVE", - 4: "DEAD", - 5: "READY", - 6: "NOT_READY", -} - -var NodeHealthCheckResponse_State_value = map[string]int32{ - "UNKNOWN": 0, - "HEALTHY": 1, - "UNHEALTHY": 2, - "ALIVE": 3, - "DEAD": 4, - "READY": 5, - "NOT_READY": 6, -} - -func (x NodeHealthCheckResponse_State) String() string { - return proto.EnumName(NodeHealthCheckResponse_State_name, int32(x)) -} - -func (NodeHealthCheckResponse_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{1, 0} -} - -type Node_State int32 - -const ( - Node_UNKNOWN Node_State = 0 - Node_FOLLOWER Node_State = 1 - Node_CANDIDATE Node_State = 2 - Node_LEADER Node_State = 3 - Node_SHUTDOWN Node_State = 4 -) - -var Node_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "FOLLOWER", - 2: "CANDIDATE", - 3: "LEADER", - 4: "SHUTDOWN", -} - -var Node_State_value = map[string]int32{ - "UNKNOWN": 0, - "FOLLOWER": 1, - "CANDIDATE": 2, - "LEADER": 3, - "SHUTDOWN": 4, -} - -func (x Node_State) String() string { - return proto.EnumName(Node_State_name, int32(x)) -} - -func (Node_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{3, 0} -} - -type ClusterWatchResponse_Event int32 - -const ( - ClusterWatchResponse_UNKNOWN ClusterWatchResponse_Event = 0 - ClusterWatchResponse_JOIN ClusterWatchResponse_Event = 1 - ClusterWatchResponse_LEAVE ClusterWatchResponse_Event = 2 - ClusterWatchResponse_UPDATE ClusterWatchResponse_Event = 3 -) - -var ClusterWatchResponse_Event_name = map[int32]string{ - 0: "UNKNOWN", - 1: "JOIN", - 2: "LEAVE", - 3: "UPDATE", -} - -var ClusterWatchResponse_Event_value = map[string]int32{ - "UNKNOWN": 0, - "JOIN": 1, - "LEAVE": 2, - "UPDATE": 3, -} - -func (x ClusterWatchResponse_Event) String() string { - return proto.EnumName(ClusterWatchResponse_Event_name, int32(x)) -} - -func (ClusterWatchResponse_Event) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{9, 0} -} - -type Proposal_Event int32 - -const ( - Proposal_UNKNOWN Proposal_Event = 0 - Proposal_SET_NODE Proposal_Event = 1 - Proposal_DELETE_NODE Proposal_Event = 2 - Proposal_INDEX Proposal_Event = 3 - Proposal_DELETE Proposal_Event = 4 - Proposal_BULK_INDEX Proposal_Event = 5 - Proposal_BULK_DELETE Proposal_Event = 6 -) - -var Proposal_Event_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SET_NODE", - 2: "DELETE_NODE", - 3: "INDEX", - 4: "DELETE", - 5: "BULK_INDEX", - 6: "BULK_DELETE", -} - -var Proposal_Event_value = map[string]int32{ - "UNKNOWN": 0, - "SET_NODE": 1, - "DELETE_NODE": 2, - "INDEX": 3, - "DELETE": 4, - "BULK_INDEX": 5, - "BULK_DELETE": 6, -} - -func (x Proposal_Event) String() string { - return proto.EnumName(Proposal_Event_name, int32(x)) -} - -func (Proposal_Event) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{24, 0} -} - -type NodeHealthCheckRequest struct { - Probe NodeHealthCheckRequest_Probe `protobuf:"varint,1,opt,name=probe,proto3,enum=index.NodeHealthCheckRequest_Probe" json:"probe,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeHealthCheckRequest) Reset() { *m = NodeHealthCheckRequest{} } -func (m *NodeHealthCheckRequest) String() string { return proto.CompactTextString(m) } -func (*NodeHealthCheckRequest) ProtoMessage() {} -func (*NodeHealthCheckRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{0} -} - -func (m *NodeHealthCheckRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeHealthCheckRequest.Unmarshal(m, b) -} -func (m *NodeHealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeHealthCheckRequest.Marshal(b, m, deterministic) -} -func (m *NodeHealthCheckRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeHealthCheckRequest.Merge(m, src) -} -func (m *NodeHealthCheckRequest) XXX_Size() int { - return xxx_messageInfo_NodeHealthCheckRequest.Size(m) -} -func (m *NodeHealthCheckRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeHealthCheckRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeHealthCheckRequest proto.InternalMessageInfo - -func (m *NodeHealthCheckRequest) GetProbe() NodeHealthCheckRequest_Probe { - if m != nil { - return m.Probe - } - return NodeHealthCheckRequest_UNKNOWN -} - -type NodeHealthCheckResponse struct { - State NodeHealthCheckResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=index.NodeHealthCheckResponse_State" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeHealthCheckResponse) Reset() { *m = NodeHealthCheckResponse{} } -func (m *NodeHealthCheckResponse) String() string { return proto.CompactTextString(m) } -func (*NodeHealthCheckResponse) ProtoMessage() {} -func (*NodeHealthCheckResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{1} -} - -func (m *NodeHealthCheckResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeHealthCheckResponse.Unmarshal(m, b) -} -func (m *NodeHealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeHealthCheckResponse.Marshal(b, m, deterministic) -} -func (m *NodeHealthCheckResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeHealthCheckResponse.Merge(m, src) -} -func (m *NodeHealthCheckResponse) XXX_Size() int { - return xxx_messageInfo_NodeHealthCheckResponse.Size(m) -} -func (m *NodeHealthCheckResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeHealthCheckResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeHealthCheckResponse proto.InternalMessageInfo - -func (m *NodeHealthCheckResponse) GetState() NodeHealthCheckResponse_State { - if m != nil { - return m.State - } - return NodeHealthCheckResponse_UNKNOWN -} - -type Metadata struct { - GrpcAddress string `protobuf:"bytes,1,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` - GrpcGatewayAddress string `protobuf:"bytes,2,opt,name=grpc_gateway_address,json=grpcGatewayAddress,proto3" json:"grpc_gateway_address,omitempty"` - HttpAddress string `protobuf:"bytes,3,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{2} -} - -func (m *Metadata) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metadata.Unmarshal(m, b) -} -func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) -} -func (m *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(m, src) -} -func (m *Metadata) XXX_Size() int { - return xxx_messageInfo_Metadata.Size(m) -} -func (m *Metadata) XXX_DiscardUnknown() { - xxx_messageInfo_Metadata.DiscardUnknown(m) -} - -var xxx_messageInfo_Metadata proto.InternalMessageInfo - -func (m *Metadata) GetGrpcAddress() string { - if m != nil { - return m.GrpcAddress - } - return "" -} - -func (m *Metadata) GetGrpcGatewayAddress() string { - if m != nil { - return m.GrpcGatewayAddress - } - return "" -} - -func (m *Metadata) GetHttpAddress() string { - if m != nil { - return m.HttpAddress - } - return "" -} - -type Node struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - BindAddress string `protobuf:"bytes,2,opt,name=bind_address,json=bindAddress,proto3" json:"bind_address,omitempty"` - State Node_State `protobuf:"varint,3,opt,name=state,proto3,enum=index.Node_State" json:"state,omitempty"` - Metadata *Metadata `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Node) Reset() { *m = Node{} } -func (m *Node) String() string { return proto.CompactTextString(m) } -func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{3} -} - -func (m *Node) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Node.Unmarshal(m, b) -} -func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Node.Marshal(b, m, deterministic) -} -func (m *Node) XXX_Merge(src proto.Message) { - xxx_messageInfo_Node.Merge(m, src) -} -func (m *Node) XXX_Size() int { - return xxx_messageInfo_Node.Size(m) -} -func (m *Node) XXX_DiscardUnknown() { - xxx_messageInfo_Node.DiscardUnknown(m) -} - -var xxx_messageInfo_Node proto.InternalMessageInfo - -func (m *Node) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *Node) GetBindAddress() string { - if m != nil { - return m.BindAddress - } - return "" -} - -func (m *Node) GetState() Node_State { - if m != nil { - return m.State - } - return Node_UNKNOWN -} - -func (m *Node) GetMetadata() *Metadata { - if m != nil { - return m.Metadata - } - return nil -} - -type Cluster struct { - Nodes map[string]*Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Cluster) Reset() { *m = Cluster{} } -func (m *Cluster) String() string { return proto.CompactTextString(m) } -func (*Cluster) ProtoMessage() {} -func (*Cluster) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{4} -} - -func (m *Cluster) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Cluster.Unmarshal(m, b) -} -func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) -} -func (m *Cluster) XXX_Merge(src proto.Message) { - xxx_messageInfo_Cluster.Merge(m, src) -} -func (m *Cluster) XXX_Size() int { - return xxx_messageInfo_Cluster.Size(m) -} -func (m *Cluster) XXX_DiscardUnknown() { - xxx_messageInfo_Cluster.DiscardUnknown(m) -} - -var xxx_messageInfo_Cluster proto.InternalMessageInfo - -func (m *Cluster) GetNodes() map[string]*Node { - if m != nil { - return m.Nodes - } - return nil -} - -type NodeInfoResponse struct { - Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeInfoResponse) Reset() { *m = NodeInfoResponse{} } -func (m *NodeInfoResponse) String() string { return proto.CompactTextString(m) } -func (*NodeInfoResponse) ProtoMessage() {} -func (*NodeInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{5} -} - -func (m *NodeInfoResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeInfoResponse.Unmarshal(m, b) -} -func (m *NodeInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeInfoResponse.Marshal(b, m, deterministic) -} -func (m *NodeInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeInfoResponse.Merge(m, src) -} -func (m *NodeInfoResponse) XXX_Size() int { - return xxx_messageInfo_NodeInfoResponse.Size(m) -} -func (m *NodeInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeInfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeInfoResponse proto.InternalMessageInfo - -func (m *NodeInfoResponse) GetNode() *Node { - if m != nil { - return m.Node - } - return nil -} - -type ClusterJoinRequest struct { - Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClusterJoinRequest) Reset() { *m = ClusterJoinRequest{} } -func (m *ClusterJoinRequest) String() string { return proto.CompactTextString(m) } -func (*ClusterJoinRequest) ProtoMessage() {} -func (*ClusterJoinRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{6} -} - -func (m *ClusterJoinRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClusterJoinRequest.Unmarshal(m, b) -} -func (m *ClusterJoinRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClusterJoinRequest.Marshal(b, m, deterministic) -} -func (m *ClusterJoinRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterJoinRequest.Merge(m, src) -} -func (m *ClusterJoinRequest) XXX_Size() int { - return xxx_messageInfo_ClusterJoinRequest.Size(m) -} -func (m *ClusterJoinRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterJoinRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterJoinRequest proto.InternalMessageInfo - -func (m *ClusterJoinRequest) GetNode() *Node { - if m != nil { - return m.Node - } - return nil -} - -type ClusterLeaveRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClusterLeaveRequest) Reset() { *m = ClusterLeaveRequest{} } -func (m *ClusterLeaveRequest) String() string { return proto.CompactTextString(m) } -func (*ClusterLeaveRequest) ProtoMessage() {} -func (*ClusterLeaveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{7} -} - -func (m *ClusterLeaveRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClusterLeaveRequest.Unmarshal(m, b) -} -func (m *ClusterLeaveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClusterLeaveRequest.Marshal(b, m, deterministic) -} -func (m *ClusterLeaveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterLeaveRequest.Merge(m, src) -} -func (m *ClusterLeaveRequest) XXX_Size() int { - return xxx_messageInfo_ClusterLeaveRequest.Size(m) -} -func (m *ClusterLeaveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterLeaveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterLeaveRequest proto.InternalMessageInfo - -func (m *ClusterLeaveRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -type ClusterInfoResponse struct { - Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClusterInfoResponse) Reset() { *m = ClusterInfoResponse{} } -func (m *ClusterInfoResponse) String() string { return proto.CompactTextString(m) } -func (*ClusterInfoResponse) ProtoMessage() {} -func (*ClusterInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{8} -} - -func (m *ClusterInfoResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClusterInfoResponse.Unmarshal(m, b) -} -func (m *ClusterInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClusterInfoResponse.Marshal(b, m, deterministic) -} -func (m *ClusterInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterInfoResponse.Merge(m, src) -} -func (m *ClusterInfoResponse) XXX_Size() int { - return xxx_messageInfo_ClusterInfoResponse.Size(m) -} -func (m *ClusterInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterInfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterInfoResponse proto.InternalMessageInfo - -func (m *ClusterInfoResponse) GetCluster() *Cluster { - if m != nil { - return m.Cluster - } - return nil -} - -type ClusterWatchResponse struct { - Event ClusterWatchResponse_Event `protobuf:"varint,1,opt,name=event,proto3,enum=index.ClusterWatchResponse_Event" json:"event,omitempty"` - Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` - Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster,proto3" json:"cluster,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClusterWatchResponse) Reset() { *m = ClusterWatchResponse{} } -func (m *ClusterWatchResponse) String() string { return proto.CompactTextString(m) } -func (*ClusterWatchResponse) ProtoMessage() {} -func (*ClusterWatchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{9} -} - -func (m *ClusterWatchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClusterWatchResponse.Unmarshal(m, b) -} -func (m *ClusterWatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClusterWatchResponse.Marshal(b, m, deterministic) -} -func (m *ClusterWatchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterWatchResponse.Merge(m, src) -} -func (m *ClusterWatchResponse) XXX_Size() int { - return xxx_messageInfo_ClusterWatchResponse.Size(m) -} -func (m *ClusterWatchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterWatchResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterWatchResponse proto.InternalMessageInfo - -func (m *ClusterWatchResponse) GetEvent() ClusterWatchResponse_Event { - if m != nil { - return m.Event - } - return ClusterWatchResponse_UNKNOWN -} - -func (m *ClusterWatchResponse) GetNode() *Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *ClusterWatchResponse) GetCluster() *Cluster { - if m != nil { - return m.Cluster - } - return nil -} - -type GetRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetRequest) Reset() { *m = GetRequest{} } -func (m *GetRequest) String() string { return proto.CompactTextString(m) } -func (*GetRequest) ProtoMessage() {} -func (*GetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{10} -} - -func (m *GetRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetRequest.Unmarshal(m, b) -} -func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) -} -func (m *GetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetRequest.Merge(m, src) -} -func (m *GetRequest) XXX_Size() int { - return xxx_messageInfo_GetRequest.Size(m) -} -func (m *GetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetRequest proto.InternalMessageInfo - -func (m *GetRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -type GetResponse struct { - // Document document = 1; - Fields *any.Any `protobuf:"bytes,1,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetResponse) Reset() { *m = GetResponse{} } -func (m *GetResponse) String() string { return proto.CompactTextString(m) } -func (*GetResponse) ProtoMessage() {} -func (*GetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{11} -} - -func (m *GetResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetResponse.Unmarshal(m, b) -} -func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) -} -func (m *GetResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetResponse.Merge(m, src) -} -func (m *GetResponse) XXX_Size() int { - return xxx_messageInfo_GetResponse.Size(m) -} -func (m *GetResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetResponse proto.InternalMessageInfo - -func (m *GetResponse) GetFields() *any.Any { - if m != nil { - return m.Fields - } - return nil -} - -type IndexRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexRequest) Reset() { *m = IndexRequest{} } -func (m *IndexRequest) String() string { return proto.CompactTextString(m) } -func (*IndexRequest) ProtoMessage() {} -func (*IndexRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{12} -} - -func (m *IndexRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexRequest.Unmarshal(m, b) -} -func (m *IndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexRequest.Marshal(b, m, deterministic) -} -func (m *IndexRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexRequest.Merge(m, src) -} -func (m *IndexRequest) XXX_Size() int { - return xxx_messageInfo_IndexRequest.Size(m) -} -func (m *IndexRequest) XXX_DiscardUnknown() { - xxx_messageInfo_IndexRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexRequest proto.InternalMessageInfo - -func (m *IndexRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *IndexRequest) GetFields() *any.Any { - if m != nil { - return m.Fields - } - return nil -} - -type DeleteRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRequest) ProtoMessage() {} -func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{13} -} - -func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) -} -func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) -} -func (m *DeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRequest.Merge(m, src) -} -func (m *DeleteRequest) XXX_Size() int { - return xxx_messageInfo_DeleteRequest.Size(m) -} -func (m *DeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo - -func (m *DeleteRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -type Document struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Fields *any.Any `protobuf:"bytes,2,opt,name=fields,proto3" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Document) Reset() { *m = Document{} } -func (m *Document) String() string { return proto.CompactTextString(m) } -func (*Document) ProtoMessage() {} -func (*Document) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{14} -} - -func (m *Document) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Document.Unmarshal(m, b) -} -func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Document.Marshal(b, m, deterministic) -} -func (m *Document) XXX_Merge(src proto.Message) { - xxx_messageInfo_Document.Merge(m, src) -} -func (m *Document) XXX_Size() int { - return xxx_messageInfo_Document.Size(m) -} -func (m *Document) XXX_DiscardUnknown() { - xxx_messageInfo_Document.DiscardUnknown(m) -} - -var xxx_messageInfo_Document proto.InternalMessageInfo - -func (m *Document) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *Document) GetFields() *any.Any { - if m != nil { - return m.Fields - } - return nil -} - -type BulkIndexRequest struct { - Documents []*Document `protobuf:"bytes,1,rep,name=documents,proto3" json:"documents,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BulkIndexRequest) Reset() { *m = BulkIndexRequest{} } -func (m *BulkIndexRequest) String() string { return proto.CompactTextString(m) } -func (*BulkIndexRequest) ProtoMessage() {} -func (*BulkIndexRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{15} -} - -func (m *BulkIndexRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkIndexRequest.Unmarshal(m, b) -} -func (m *BulkIndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkIndexRequest.Marshal(b, m, deterministic) -} -func (m *BulkIndexRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkIndexRequest.Merge(m, src) -} -func (m *BulkIndexRequest) XXX_Size() int { - return xxx_messageInfo_BulkIndexRequest.Size(m) -} -func (m *BulkIndexRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BulkIndexRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BulkIndexRequest proto.InternalMessageInfo - -func (m *BulkIndexRequest) GetDocuments() []*Document { - if m != nil { - return m.Documents - } - return nil -} - -type BulkIndexResponse struct { - Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BulkIndexResponse) Reset() { *m = BulkIndexResponse{} } -func (m *BulkIndexResponse) String() string { return proto.CompactTextString(m) } -func (*BulkIndexResponse) ProtoMessage() {} -func (*BulkIndexResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{16} -} - -func (m *BulkIndexResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkIndexResponse.Unmarshal(m, b) -} -func (m *BulkIndexResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkIndexResponse.Marshal(b, m, deterministic) -} -func (m *BulkIndexResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkIndexResponse.Merge(m, src) -} -func (m *BulkIndexResponse) XXX_Size() int { - return xxx_messageInfo_BulkIndexResponse.Size(m) -} -func (m *BulkIndexResponse) XXX_DiscardUnknown() { - xxx_messageInfo_BulkIndexResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_BulkIndexResponse proto.InternalMessageInfo - -func (m *BulkIndexResponse) GetCount() int32 { - if m != nil { - return m.Count - } - return 0 -} - -type BulkDeleteRequest struct { - Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BulkDeleteRequest) Reset() { *m = BulkDeleteRequest{} } -func (m *BulkDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*BulkDeleteRequest) ProtoMessage() {} -func (*BulkDeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{17} -} - -func (m *BulkDeleteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkDeleteRequest.Unmarshal(m, b) -} -func (m *BulkDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkDeleteRequest.Marshal(b, m, deterministic) -} -func (m *BulkDeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkDeleteRequest.Merge(m, src) -} -func (m *BulkDeleteRequest) XXX_Size() int { - return xxx_messageInfo_BulkDeleteRequest.Size(m) -} -func (m *BulkDeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BulkDeleteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BulkDeleteRequest proto.InternalMessageInfo - -func (m *BulkDeleteRequest) GetIds() []string { - if m != nil { - return m.Ids - } - return nil -} - -type BulkDeleteResponse struct { - Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BulkDeleteResponse) Reset() { *m = BulkDeleteResponse{} } -func (m *BulkDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*BulkDeleteResponse) ProtoMessage() {} -func (*BulkDeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{18} -} - -func (m *BulkDeleteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BulkDeleteResponse.Unmarshal(m, b) -} -func (m *BulkDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BulkDeleteResponse.Marshal(b, m, deterministic) -} -func (m *BulkDeleteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_BulkDeleteResponse.Merge(m, src) -} -func (m *BulkDeleteResponse) XXX_Size() int { - return xxx_messageInfo_BulkDeleteResponse.Size(m) -} -func (m *BulkDeleteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_BulkDeleteResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_BulkDeleteResponse proto.InternalMessageInfo - -func (m *BulkDeleteResponse) GetCount() int32 { - if m != nil { - return m.Count - } - return 0 -} - -type SearchRequest struct { - SearchRequest *any.Any `protobuf:"bytes,1,opt,name=search_request,json=searchRequest,proto3" json:"search_request,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SearchRequest) Reset() { *m = SearchRequest{} } -func (m *SearchRequest) String() string { return proto.CompactTextString(m) } -func (*SearchRequest) ProtoMessage() {} -func (*SearchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{19} -} - -func (m *SearchRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SearchRequest.Unmarshal(m, b) -} -func (m *SearchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SearchRequest.Marshal(b, m, deterministic) -} -func (m *SearchRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SearchRequest.Merge(m, src) -} -func (m *SearchRequest) XXX_Size() int { - return xxx_messageInfo_SearchRequest.Size(m) -} -func (m *SearchRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SearchRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SearchRequest proto.InternalMessageInfo - -func (m *SearchRequest) GetSearchRequest() *any.Any { - if m != nil { - return m.SearchRequest - } - return nil -} - -type SearchResponse struct { - SearchResult *any.Any `protobuf:"bytes,1,opt,name=search_result,json=searchResult,proto3" json:"search_result,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SearchResponse) Reset() { *m = SearchResponse{} } -func (m *SearchResponse) String() string { return proto.CompactTextString(m) } -func (*SearchResponse) ProtoMessage() {} -func (*SearchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{20} -} - -func (m *SearchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SearchResponse.Unmarshal(m, b) -} -func (m *SearchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SearchResponse.Marshal(b, m, deterministic) -} -func (m *SearchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SearchResponse.Merge(m, src) -} -func (m *SearchResponse) XXX_Size() int { - return xxx_messageInfo_SearchResponse.Size(m) -} -func (m *SearchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SearchResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SearchResponse proto.InternalMessageInfo - -func (m *SearchResponse) GetSearchResult() *any.Any { - if m != nil { - return m.SearchResult - } - return nil -} - -type IndexConfig struct { - IndexMapping *any.Any `protobuf:"bytes,1,opt,name=index_mapping,json=indexMapping,proto3" json:"index_mapping,omitempty"` - IndexType string `protobuf:"bytes,2,opt,name=index_type,json=indexType,proto3" json:"index_type,omitempty"` - IndexStorageType string `protobuf:"bytes,3,opt,name=index_storage_type,json=indexStorageType,proto3" json:"index_storage_type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexConfig) Reset() { *m = IndexConfig{} } -func (m *IndexConfig) String() string { return proto.CompactTextString(m) } -func (*IndexConfig) ProtoMessage() {} -func (*IndexConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{21} -} - -func (m *IndexConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexConfig.Unmarshal(m, b) -} -func (m *IndexConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexConfig.Marshal(b, m, deterministic) -} -func (m *IndexConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexConfig.Merge(m, src) -} -func (m *IndexConfig) XXX_Size() int { - return xxx_messageInfo_IndexConfig.Size(m) -} -func (m *IndexConfig) XXX_DiscardUnknown() { - xxx_messageInfo_IndexConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexConfig proto.InternalMessageInfo - -func (m *IndexConfig) GetIndexMapping() *any.Any { - if m != nil { - return m.IndexMapping - } - return nil -} - -func (m *IndexConfig) GetIndexType() string { - if m != nil { - return m.IndexType - } - return "" -} - -func (m *IndexConfig) GetIndexStorageType() string { - if m != nil { - return m.IndexStorageType - } - return "" -} - -type GetIndexConfigResponse struct { - IndexConfig *IndexConfig `protobuf:"bytes,1,opt,name=index_config,json=indexConfig,proto3" json:"index_config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetIndexConfigResponse) Reset() { *m = GetIndexConfigResponse{} } -func (m *GetIndexConfigResponse) String() string { return proto.CompactTextString(m) } -func (*GetIndexConfigResponse) ProtoMessage() {} -func (*GetIndexConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{22} -} - -func (m *GetIndexConfigResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetIndexConfigResponse.Unmarshal(m, b) -} -func (m *GetIndexConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetIndexConfigResponse.Marshal(b, m, deterministic) -} -func (m *GetIndexConfigResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetIndexConfigResponse.Merge(m, src) -} -func (m *GetIndexConfigResponse) XXX_Size() int { - return xxx_messageInfo_GetIndexConfigResponse.Size(m) -} -func (m *GetIndexConfigResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetIndexConfigResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetIndexConfigResponse proto.InternalMessageInfo - -func (m *GetIndexConfigResponse) GetIndexConfig() *IndexConfig { - if m != nil { - return m.IndexConfig - } - return nil -} - -type GetIndexStatsResponse struct { - IndexStats *any.Any `protobuf:"bytes,1,opt,name=index_stats,json=indexStats,proto3" json:"index_stats,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetIndexStatsResponse) Reset() { *m = GetIndexStatsResponse{} } -func (m *GetIndexStatsResponse) String() string { return proto.CompactTextString(m) } -func (*GetIndexStatsResponse) ProtoMessage() {} -func (*GetIndexStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{23} -} - -func (m *GetIndexStatsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetIndexStatsResponse.Unmarshal(m, b) -} -func (m *GetIndexStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetIndexStatsResponse.Marshal(b, m, deterministic) -} -func (m *GetIndexStatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetIndexStatsResponse.Merge(m, src) -} -func (m *GetIndexStatsResponse) XXX_Size() int { - return xxx_messageInfo_GetIndexStatsResponse.Size(m) -} -func (m *GetIndexStatsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetIndexStatsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetIndexStatsResponse proto.InternalMessageInfo - -func (m *GetIndexStatsResponse) GetIndexStats() *any.Any { - if m != nil { - return m.IndexStats - } - return nil -} - -type Proposal struct { - Event Proposal_Event `protobuf:"varint,1,opt,name=event,proto3,enum=index.Proposal_Event" json:"event,omitempty"` - Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` - Document *Document `protobuf:"bytes,3,opt,name=document,proto3" json:"document,omitempty"` - Id string `protobuf:"bytes,4,opt,name=id,proto3" json:"id,omitempty"` - Documents []*Document `protobuf:"bytes,5,rep,name=documents,proto3" json:"documents,omitempty"` - Ids []string `protobuf:"bytes,6,rep,name=ids,proto3" json:"ids,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Proposal) Reset() { *m = Proposal{} } -func (m *Proposal) String() string { return proto.CompactTextString(m) } -func (*Proposal) ProtoMessage() {} -func (*Proposal) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{24} -} - -func (m *Proposal) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Proposal.Unmarshal(m, b) -} -func (m *Proposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Proposal.Marshal(b, m, deterministic) -} -func (m *Proposal) XXX_Merge(src proto.Message) { - xxx_messageInfo_Proposal.Merge(m, src) -} -func (m *Proposal) XXX_Size() int { - return xxx_messageInfo_Proposal.Size(m) -} -func (m *Proposal) XXX_DiscardUnknown() { - xxx_messageInfo_Proposal.DiscardUnknown(m) -} - -var xxx_messageInfo_Proposal proto.InternalMessageInfo - -func (m *Proposal) GetEvent() Proposal_Event { - if m != nil { - return m.Event - } - return Proposal_UNKNOWN -} - -func (m *Proposal) GetNode() *Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *Proposal) GetDocument() *Document { - if m != nil { - return m.Document - } - return nil -} - -func (m *Proposal) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *Proposal) GetDocuments() []*Document { - if m != nil { - return m.Documents - } - return nil -} - -func (m *Proposal) GetIds() []string { - if m != nil { - return m.Ids - } - return nil -} - -func init() { - proto.RegisterEnum("index.NodeHealthCheckRequest_Probe", NodeHealthCheckRequest_Probe_name, NodeHealthCheckRequest_Probe_value) - proto.RegisterEnum("index.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) - proto.RegisterEnum("index.Node_State", Node_State_name, Node_State_value) - proto.RegisterEnum("index.ClusterWatchResponse_Event", ClusterWatchResponse_Event_name, ClusterWatchResponse_Event_value) - proto.RegisterEnum("index.Proposal_Event", Proposal_Event_name, Proposal_Event_value) - proto.RegisterType((*NodeHealthCheckRequest)(nil), "index.NodeHealthCheckRequest") - proto.RegisterType((*NodeHealthCheckResponse)(nil), "index.NodeHealthCheckResponse") - proto.RegisterType((*Metadata)(nil), "index.Metadata") - proto.RegisterType((*Node)(nil), "index.Node") - proto.RegisterType((*Cluster)(nil), "index.Cluster") - proto.RegisterMapType((map[string]*Node)(nil), "index.Cluster.NodesEntry") - proto.RegisterType((*NodeInfoResponse)(nil), "index.NodeInfoResponse") - proto.RegisterType((*ClusterJoinRequest)(nil), "index.ClusterJoinRequest") - proto.RegisterType((*ClusterLeaveRequest)(nil), "index.ClusterLeaveRequest") - proto.RegisterType((*ClusterInfoResponse)(nil), "index.ClusterInfoResponse") - proto.RegisterType((*ClusterWatchResponse)(nil), "index.ClusterWatchResponse") - proto.RegisterType((*GetRequest)(nil), "index.GetRequest") - proto.RegisterType((*GetResponse)(nil), "index.GetResponse") - proto.RegisterType((*IndexRequest)(nil), "index.IndexRequest") - proto.RegisterType((*DeleteRequest)(nil), "index.DeleteRequest") - proto.RegisterType((*Document)(nil), "index.Document") - proto.RegisterType((*BulkIndexRequest)(nil), "index.BulkIndexRequest") - proto.RegisterType((*BulkIndexResponse)(nil), "index.BulkIndexResponse") - proto.RegisterType((*BulkDeleteRequest)(nil), "index.BulkDeleteRequest") - proto.RegisterType((*BulkDeleteResponse)(nil), "index.BulkDeleteResponse") - proto.RegisterType((*SearchRequest)(nil), "index.SearchRequest") - proto.RegisterType((*SearchResponse)(nil), "index.SearchResponse") - proto.RegisterType((*IndexConfig)(nil), "index.IndexConfig") - proto.RegisterType((*GetIndexConfigResponse)(nil), "index.GetIndexConfigResponse") - proto.RegisterType((*GetIndexStatsResponse)(nil), "index.GetIndexStatsResponse") - proto.RegisterType((*Proposal)(nil), "index.Proposal") -} - -func init() { proto.RegisterFile("protobuf/index/index.proto", fileDescriptor_7b2daf652facb3ae) } - -var fileDescriptor_7b2daf652facb3ae = []byte{ - // 1454 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0xdf, 0x72, 0xda, 0xc6, - 0x17, 0xb6, 0x00, 0x61, 0x7c, 0x04, 0x58, 0xd9, 0x60, 0x3b, 0x51, 0xec, 0x5f, 0xe2, 0xfd, 0x35, - 0xad, 0x4b, 0x5a, 0x48, 0x9d, 0x66, 0xda, 0x38, 0xed, 0x74, 0xb0, 0x51, 0x6d, 0x62, 0x02, 0x19, - 0x81, 0x93, 0x26, 0x33, 0x1d, 0x46, 0xc0, 0x1a, 0x54, 0x63, 0x89, 0x22, 0xe1, 0x96, 0xe9, 0xf4, - 0xa2, 0x79, 0x85, 0x4e, 0xa7, 0x6f, 0xd2, 0x17, 0xe8, 0x13, 0x74, 0x7a, 0x9b, 0xcb, 0x3e, 0x48, - 0x67, 0xff, 0x48, 0x48, 0xd8, 0x90, 0x76, 0x7a, 0xe3, 0x61, 0xcf, 0xf9, 0xce, 0xb7, 0xdf, 0x39, - 0x3a, 0xbb, 0x67, 0x0d, 0xda, 0x70, 0xe4, 0x78, 0x4e, 0x7b, 0x7c, 0x5a, 0xb4, 0xec, 0x2e, 0xf9, - 0x9e, 0xff, 0x2d, 0x30, 0x23, 0x92, 0xd9, 0x42, 0xbb, 0xd9, 0x73, 0x9c, 0xde, 0x80, 0x14, 0x03, - 0xa4, 0x69, 0x4f, 0x38, 0x42, 0xbb, 0x35, 0xeb, 0x22, 0xe7, 0x43, 0xcf, 0x77, 0x6e, 0x0a, 0xa7, - 0x39, 0xb4, 0x8a, 0xa6, 0x6d, 0x3b, 0x9e, 0xe9, 0x59, 0x8e, 0xed, 0x72, 0x2f, 0xfe, 0x55, 0x82, - 0xf5, 0x9a, 0xd3, 0x25, 0x47, 0xc4, 0x1c, 0x78, 0xfd, 0x83, 0x3e, 0xe9, 0x9c, 0x19, 0xe4, 0xdb, - 0x31, 0x71, 0x3d, 0xf4, 0x08, 0xe4, 0xe1, 0xc8, 0x69, 0x93, 0x1b, 0xd2, 0x1d, 0x69, 0x27, 0xbb, - 0xfb, 0xff, 0x02, 0x17, 0x75, 0x35, 0xba, 0xf0, 0x8c, 0x42, 0x0d, 0x1e, 0x81, 0xf7, 0x41, 0x66, - 0x6b, 0xa4, 0xc0, 0xf2, 0x49, 0xed, 0xb8, 0x56, 0x7f, 0x51, 0x53, 0x97, 0xd0, 0x2a, 0x28, 0x47, - 0x7a, 0xa9, 0xda, 0x3c, 0xaa, 0xd4, 0xf4, 0x46, 0x43, 0x95, 0x50, 0x1a, 0x52, 0xd5, 0xca, 0x73, - 0x9d, 0xad, 0x62, 0x28, 0x03, 0x2b, 0x86, 0x5e, 0x2a, 0x73, 0x67, 0x1c, 0xff, 0x26, 0xc1, 0xc6, - 0xa5, 0xbd, 0xdc, 0xa1, 0x63, 0xbb, 0x04, 0xed, 0x81, 0xec, 0x7a, 0xa6, 0xe7, 0x4b, 0x7b, 0x67, - 0x9e, 0x34, 0x0e, 0x2f, 0x34, 0x28, 0xd6, 0xe0, 0x21, 0xb8, 0x05, 0x32, 0x5b, 0x47, 0xb5, 0x29, - 0xb0, 0xcc, 0xb5, 0xbd, 0x54, 0x25, 0xaa, 0xe4, 0xa4, 0xe6, 0x2f, 0x63, 0x68, 0x05, 0xe4, 0x12, - 0xd5, 0xa9, 0xc6, 0x51, 0x0a, 0x12, 0x65, 0xbd, 0x54, 0x56, 0x13, 0xd4, 0x48, 0xd5, 0xbe, 0x54, - 0x65, 0x0a, 0xaf, 0xd5, 0x9b, 0x2d, 0xbe, 0x4c, 0xe2, 0xd7, 0x12, 0xa4, 0x9e, 0x12, 0xcf, 0xec, - 0x9a, 0x9e, 0x89, 0xb6, 0x21, 0xdd, 0x1b, 0x0d, 0x3b, 0x2d, 0xb3, 0xdb, 0x1d, 0x11, 0xd7, 0x65, - 0x82, 0x57, 0x0c, 0x85, 0xda, 0x4a, 0xdc, 0x84, 0xee, 0x43, 0x8e, 0x41, 0x7a, 0xa6, 0x47, 0xbe, - 0x33, 0x27, 0x01, 0x34, 0xc6, 0xa0, 0x88, 0xfa, 0x0e, 0xb9, 0xcb, 0x8f, 0xd8, 0x86, 0x74, 0xdf, - 0xf3, 0x86, 0x01, 0x32, 0xce, 0x49, 0xa9, 0x4d, 0x40, 0xf0, 0x1b, 0x09, 0x12, 0xb4, 0x1c, 0x28, - 0x0b, 0x31, 0xab, 0x2b, 0xb6, 0x8d, 0x59, 0x5d, 0x1a, 0xdb, 0xb6, 0xec, 0xee, 0xcc, 0x2e, 0x0a, - 0xb5, 0xf9, 0xf4, 0xef, 0xf9, 0xd5, 0x8d, 0xb3, 0xea, 0x5e, 0x0b, 0x55, 0x37, 0x52, 0x4a, 0x74, - 0x0f, 0x52, 0xe7, 0x22, 0xd1, 0x1b, 0x89, 0x3b, 0xd2, 0x8e, 0xb2, 0xbb, 0x2a, 0xb0, 0x7e, 0xfe, - 0x46, 0x00, 0xc0, 0xc7, 0x57, 0xd6, 0x3d, 0x0d, 0xa9, 0x2f, 0xeb, 0xd5, 0x6a, 0xfd, 0x85, 0x6e, - 0xf0, 0xc2, 0x1f, 0x94, 0x6a, 0xe5, 0x4a, 0xb9, 0xd4, 0xd4, 0xd5, 0x18, 0x02, 0x48, 0x56, 0xf5, - 0x52, 0x59, 0x37, 0xd4, 0x38, 0x05, 0x36, 0x8e, 0x4e, 0x9a, 0x65, 0x1a, 0x96, 0xc0, 0x3f, 0x49, - 0xb0, 0x7c, 0x30, 0x18, 0xbb, 0x1e, 0x19, 0xa1, 0x22, 0xc8, 0xb6, 0xd3, 0x25, 0xb4, 0xb6, 0xf1, - 0x1d, 0x65, 0xf7, 0xa6, 0x90, 0x20, 0xdc, 0x4c, 0xb6, 0xab, 0xdb, 0xde, 0x68, 0x62, 0x70, 0x9c, - 0xa6, 0x03, 0x4c, 0x8d, 0x48, 0x85, 0xf8, 0x19, 0x99, 0x88, 0x0a, 0xd1, 0x9f, 0x68, 0x1b, 0xe4, - 0x0b, 0x73, 0x30, 0x26, 0xac, 0x36, 0xca, 0xae, 0x12, 0xca, 0xdf, 0xe0, 0x9e, 0xbd, 0xd8, 0xa7, - 0x12, 0x7e, 0x00, 0x2a, 0x35, 0x55, 0xec, 0x53, 0x27, 0x68, 0xcc, 0xdb, 0x90, 0xa0, 0x7b, 0x30, - 0xb6, 0x99, 0x48, 0xe6, 0xc0, 0x0f, 0x01, 0x09, 0x61, 0x4f, 0x1c, 0xcb, 0xf6, 0x8f, 0xda, 0x5b, - 0xc3, 0xee, 0xc2, 0x75, 0x11, 0x56, 0x25, 0xe6, 0x05, 0xf1, 0xe3, 0x66, 0x3e, 0x2e, 0xfe, 0x22, - 0x80, 0x45, 0x54, 0xed, 0xc0, 0x72, 0x87, 0x9b, 0xc5, 0x0e, 0xd9, 0x68, 0x8d, 0x0c, 0xdf, 0x8d, - 0xff, 0x90, 0x20, 0x27, 0x8c, 0x2f, 0x4c, 0xaf, 0xd3, 0x0f, 0x28, 0x3e, 0x01, 0x99, 0x5c, 0x10, - 0xdb, 0x13, 0x27, 0x6e, 0x3b, 0x4a, 0x10, 0xc1, 0x16, 0x74, 0x0a, 0x34, 0x38, 0x3e, 0x48, 0x2d, - 0x36, 0x27, 0xb5, 0xb0, 0xb8, 0xf8, 0x62, 0x71, 0x0f, 0x41, 0x66, 0xd4, 0xd1, 0x0e, 0x4a, 0x41, - 0xe2, 0x49, 0xbd, 0x52, 0x53, 0x25, 0x7a, 0x24, 0xab, 0x7a, 0xe9, 0xb9, 0xe8, 0x9c, 0x93, 0x67, - 0xac, 0x8b, 0xe2, 0x78, 0x13, 0xe0, 0x90, 0x78, 0xf3, 0x4a, 0xf6, 0x18, 0x14, 0xe6, 0x15, 0x79, - 0x7e, 0x00, 0xc9, 0x53, 0x8b, 0x0c, 0xba, 0xae, 0xa8, 0x54, 0xae, 0xc0, 0xaf, 0xcf, 0x82, 0x7f, - 0xb7, 0x16, 0x4a, 0xf6, 0xc4, 0x10, 0x18, 0x5c, 0x85, 0x74, 0x85, 0x6a, 0x9d, 0x43, 0x1e, 0x62, - 0x8b, 0xfd, 0x03, 0xb6, 0xdb, 0x90, 0x29, 0x93, 0x01, 0xf1, 0xe6, 0x7e, 0xde, 0x23, 0x48, 0x95, - 0x9d, 0xce, 0xf8, 0x9c, 0xd6, 0xe0, 0xbf, 0x6d, 0x55, 0x02, 0x75, 0x7f, 0x3c, 0x38, 0x8b, 0x88, - 0xff, 0x10, 0x56, 0xba, 0x82, 0xdd, 0x3f, 0x4b, 0xfe, 0x71, 0xf6, 0x77, 0x35, 0xa6, 0x08, 0xfc, - 0x3e, 0x5c, 0x0b, 0x51, 0x88, 0xf2, 0xe5, 0x40, 0xee, 0x38, 0x63, 0xd1, 0x26, 0xb2, 0xc1, 0x17, - 0xf8, 0x2e, 0x87, 0x46, 0x93, 0x53, 0x21, 0x6e, 0x75, 0xf9, 0x46, 0x2b, 0x06, 0xfd, 0x89, 0xf3, - 0x80, 0xc2, 0xb0, 0x85, 0x94, 0x55, 0xc8, 0x34, 0x88, 0x39, 0xa2, 0x5d, 0xc7, 0xe9, 0x1e, 0x43, - 0xd6, 0x65, 0x86, 0xd6, 0x88, 0x5b, 0x16, 0x7e, 0xc0, 0x8c, 0x1b, 0x0e, 0xc6, 0xc7, 0x90, 0xf5, - 0xd9, 0xc4, 0xae, 0x8f, 0x20, 0x13, 0xd0, 0xb9, 0xe3, 0xc1, 0x62, 0xb6, 0xb4, 0xcf, 0x46, 0x91, - 0xf8, 0x17, 0x09, 0x14, 0x56, 0x95, 0x03, 0xc7, 0x3e, 0xb5, 0x7a, 0x94, 0x8a, 0x55, 0xb1, 0x75, - 0x6e, 0x0e, 0x87, 0x96, 0xdd, 0x5b, 0x4c, 0xc5, 0xa0, 0x4f, 0x39, 0x12, 0x6d, 0x01, 0xf0, 0x50, - 0x6f, 0x32, 0x24, 0xe2, 0xaa, 0x5e, 0x61, 0x96, 0xe6, 0x64, 0x48, 0x9b, 0x15, 0x71, 0xb7, 0xeb, - 0x39, 0x23, 0xb3, 0x47, 0x38, 0x8c, 0x4f, 0x03, 0x95, 0x79, 0x1a, 0xdc, 0x41, 0xd1, 0xb8, 0x0e, - 0xeb, 0x87, 0xc4, 0x0b, 0x29, 0x0b, 0x92, 0x7d, 0x08, 0x7c, 0xdb, 0x56, 0x87, 0xd9, 0x85, 0x40, - 0x24, 0x3e, 0x7e, 0x38, 0x42, 0xb1, 0xa6, 0x0b, 0x5c, 0x83, 0x35, 0x9f, 0x90, 0xde, 0xec, 0x6e, - 0x88, 0x4f, 0xf1, 0x75, 0x99, 0xde, 0xe2, 0x93, 0x04, 0x56, 0x10, 0x8e, 0x7f, 0x8f, 0x41, 0xea, - 0xd9, 0xc8, 0x19, 0x3a, 0xae, 0x39, 0x40, 0xf7, 0xa2, 0x17, 0xce, 0x9a, 0x10, 0xe3, 0xfb, 0xff, - 0xe5, 0x25, 0x73, 0x0f, 0x52, 0x7e, 0xe7, 0x8a, 0x5b, 0xe6, 0x52, 0x6b, 0x07, 0x00, 0x71, 0xb4, - 0x12, 0xc1, 0xd1, 0x8a, 0x1c, 0x0c, 0xf9, 0x6d, 0x07, 0xc3, 0x6f, 0xec, 0xe4, 0xb4, 0xb1, 0xcf, - 0xae, 0xbc, 0xb8, 0xe8, 0x44, 0xd3, 0x9b, 0xad, 0x5a, 0xbd, 0xac, 0xab, 0x12, 0x7d, 0x1c, 0x95, - 0xf5, 0xaa, 0xde, 0xd4, 0xb9, 0x81, 0xbd, 0x3a, 0x2a, 0xb5, 0xb2, 0xfe, 0x95, 0x1a, 0xa7, 0xb7, - 0x19, 0xf7, 0xa9, 0x09, 0x94, 0x05, 0xd8, 0x3f, 0xa9, 0x1e, 0xb7, 0xb8, 0x4f, 0xa6, 0x71, 0x6c, - 0x2d, 0x00, 0xc9, 0xdd, 0x37, 0x34, 0x90, 0x8a, 0x43, 0x36, 0xac, 0xce, 0xbc, 0x88, 0xd0, 0xd6, - 0xc2, 0x47, 0x9c, 0xf6, 0xbf, 0xc5, 0x0f, 0x29, 0xbc, 0xf9, 0xfa, 0xcf, 0xbf, 0x7e, 0x8e, 0xad, - 0xa3, 0x5c, 0xf1, 0xe2, 0xa3, 0x22, 0x2d, 0x6c, 0xb1, 0xcf, 0x50, 0x1d, 0x46, 0xde, 0x84, 0x94, - 0x3f, 0x10, 0xd1, 0xfa, 0xa5, 0xaf, 0xad, 0xd3, 0x37, 0xa9, 0xb6, 0x11, 0xda, 0x21, 0x3c, 0xa3, - 0xf0, 0x06, 0xa3, 0xbe, 0x86, 0x56, 0x03, 0x6a, 0xda, 0x3c, 0x63, 0x17, 0xed, 0x83, 0x12, 0x9a, - 0x98, 0x68, 0x66, 0xbc, 0x87, 0xa6, 0xa8, 0x36, 0x67, 0x4f, 0xbc, 0x84, 0xca, 0x90, 0x0e, 0x8f, - 0x4f, 0xa4, 0x45, 0x49, 0xc2, 0x33, 0x75, 0x01, 0xcb, 0xd7, 0x81, 0x92, 0x85, 0x29, 0xce, 0x90, - 0x47, 0xb2, 0xd4, 0x58, 0x96, 0x39, 0x84, 0x68, 0x96, 0x62, 0xae, 0xf9, 0x89, 0x1e, 0x06, 0x22, - 0xd9, 0x38, 0x9d, 0xcb, 0x7f, 0x6b, 0xc1, 0xec, 0xc5, 0x4b, 0xf7, 0x25, 0x74, 0x0c, 0xf1, 0x43, - 0xe2, 0x21, 0xff, 0xdd, 0x36, 0x1d, 0x7e, 0x1a, 0x0a, 0x9b, 0x44, 0xc4, 0x16, 0x93, 0xb4, 0x81, - 0xd6, 0xa8, 0xa4, 0xa0, 0x8b, 0x8b, 0x3f, 0x58, 0xdd, 0xcf, 0xf3, 0xf9, 0x1f, 0xd1, 0x37, 0x7e, - 0x37, 0x5d, 0x0f, 0x5f, 0x07, 0x6f, 0x2b, 0xd6, 0xc7, 0x8c, 0xb4, 0xa0, 0x65, 0x22, 0xa4, 0x7b, - 0x52, 0xfe, 0x95, 0xa6, 0x5d, 0xbd, 0xd1, 0x9e, 0x94, 0x47, 0x27, 0x90, 0xe4, 0x97, 0x3f, 0xca, - 0xf9, 0xe7, 0x2b, 0x3c, 0x32, 0xe6, 0xee, 0x26, 0x52, 0xc8, 0xcf, 0x49, 0xa1, 0x01, 0x2b, 0xc1, - 0xa4, 0x42, 0x7e, 0x03, 0xce, 0x8e, 0x3f, 0xed, 0xc6, 0x65, 0x87, 0xa8, 0xd0, 0x75, 0x46, 0x9f, - 0xd1, 0x52, 0x94, 0xbe, 0x3d, 0x1e, 0x9c, 0x51, 0xad, 0xcf, 0x01, 0xa6, 0xc3, 0x0a, 0x85, 0x83, - 0xa3, 0x9a, 0x6f, 0x5e, 0xe1, 0x89, 0xf2, 0xe6, 0x23, 0xbc, 0x55, 0x48, 0xf2, 0x51, 0x14, 0xd4, - 0x20, 0x32, 0xe7, 0xb4, 0xb5, 0x19, 0xab, 0xe0, 0x5a, 0x63, 0x5c, 0xab, 0x18, 0x28, 0x17, 0x1f, - 0x47, 0x94, 0xad, 0x02, 0xd9, 0xe8, 0x9d, 0x3f, 0xb7, 0xab, 0xb6, 0xa6, 0xad, 0x71, 0xc5, 0x88, - 0xc0, 0x4b, 0xe8, 0x10, 0x32, 0x91, 0xdb, 0x7e, 0x2e, 0xd3, 0xe6, 0x0c, 0x53, 0x64, 0x36, 0xe0, - 0x25, 0xf4, 0x19, 0xa4, 0x1a, 0xb6, 0x39, 0x74, 0xfb, 0x8e, 0x37, 0x97, 0x63, 0xee, 0x21, 0xdc, - 0xdf, 0x79, 0xf5, 0x6e, 0xcf, 0xf2, 0xfa, 0xe3, 0x76, 0xa1, 0xe3, 0x9c, 0x17, 0xcf, 0x1d, 0x77, - 0x7c, 0x66, 0x16, 0xdb, 0x03, 0xd3, 0xf5, 0x8a, 0xd1, 0xff, 0xa1, 0xdb, 0x49, 0xb6, 0x7e, 0xf0, - 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd3, 0x30, 0x9b, 0x20, 0x5c, 0x0f, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// IndexClient is the client API for Index service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type IndexClient interface { - NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) - NodeInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeInfoResponse, error) - ClusterJoin(ctx context.Context, in *ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) - ClusterLeave(ctx context.Context, in *ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) - ClusterInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterInfoResponse, error) - ClusterWatch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_ClusterWatchClient, error) - Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) - Index(ctx context.Context, in *IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) - Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) - BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) - BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) - Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) - GetIndexConfig(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexConfigResponse, error) - GetIndexStats(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexStatsResponse, error) - Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) -} - -type indexClient struct { - cc *grpc.ClientConn -} - -func NewIndexClient(cc *grpc.ClientConn) IndexClient { - return &indexClient{cc} -} - -func (c *indexClient) NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) { - out := new(NodeHealthCheckResponse) - err := c.cc.Invoke(ctx, "/index.Index/NodeHealthCheck", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) NodeInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeInfoResponse, error) { - out := new(NodeInfoResponse) - err := c.cc.Invoke(ctx, "/index.Index/NodeInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) ClusterJoin(ctx context.Context, in *ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/index.Index/ClusterJoin", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) ClusterLeave(ctx context.Context, in *ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/index.Index/ClusterLeave", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) ClusterInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterInfoResponse, error) { - out := new(ClusterInfoResponse) - err := c.cc.Invoke(ctx, "/index.Index/ClusterInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) ClusterWatch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_ClusterWatchClient, error) { - stream, err := c.cc.NewStream(ctx, &_Index_serviceDesc.Streams[0], "/index.Index/ClusterWatch", opts...) - if err != nil { - return nil, err - } - x := &indexClusterWatchClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Index_ClusterWatchClient interface { - Recv() (*ClusterWatchResponse, error) - grpc.ClientStream -} - -type indexClusterWatchClient struct { - grpc.ClientStream -} - -func (x *indexClusterWatchClient) Recv() (*ClusterWatchResponse, error) { - m := new(ClusterWatchResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *indexClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { - out := new(GetResponse) - err := c.cc.Invoke(ctx, "/index.Index/Get", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) Index(ctx context.Context, in *IndexRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/index.Index/Index", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/index.Index/Delete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) BulkIndex(ctx context.Context, in *BulkIndexRequest, opts ...grpc.CallOption) (*BulkIndexResponse, error) { - out := new(BulkIndexResponse) - err := c.cc.Invoke(ctx, "/index.Index/BulkIndex", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) BulkDelete(ctx context.Context, in *BulkDeleteRequest, opts ...grpc.CallOption) (*BulkDeleteResponse, error) { - out := new(BulkDeleteResponse) - err := c.cc.Invoke(ctx, "/index.Index/BulkDelete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { - out := new(SearchResponse) - err := c.cc.Invoke(ctx, "/index.Index/Search", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) GetIndexConfig(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexConfigResponse, error) { - out := new(GetIndexConfigResponse) - err := c.cc.Invoke(ctx, "/index.Index/GetIndexConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) GetIndexStats(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetIndexStatsResponse, error) { - out := new(GetIndexStatsResponse) - err := c.cc.Invoke(ctx, "/index.Index/GetIndexStats", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/index.Index/Snapshot", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// IndexServer is the server API for Index service. -type IndexServer interface { - NodeHealthCheck(context.Context, *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) - NodeInfo(context.Context, *empty.Empty) (*NodeInfoResponse, error) - ClusterJoin(context.Context, *ClusterJoinRequest) (*empty.Empty, error) - ClusterLeave(context.Context, *ClusterLeaveRequest) (*empty.Empty, error) - ClusterInfo(context.Context, *empty.Empty) (*ClusterInfoResponse, error) - ClusterWatch(*empty.Empty, Index_ClusterWatchServer) error - Get(context.Context, *GetRequest) (*GetResponse, error) - Index(context.Context, *IndexRequest) (*empty.Empty, error) - Delete(context.Context, *DeleteRequest) (*empty.Empty, error) - BulkIndex(context.Context, *BulkIndexRequest) (*BulkIndexResponse, error) - BulkDelete(context.Context, *BulkDeleteRequest) (*BulkDeleteResponse, error) - Search(context.Context, *SearchRequest) (*SearchResponse, error) - GetIndexConfig(context.Context, *empty.Empty) (*GetIndexConfigResponse, error) - GetIndexStats(context.Context, *empty.Empty) (*GetIndexStatsResponse, error) - Snapshot(context.Context, *empty.Empty) (*empty.Empty, error) -} - -// UnimplementedIndexServer can be embedded to have forward compatible implementations. -type UnimplementedIndexServer struct { -} - -func (*UnimplementedIndexServer) NodeHealthCheck(ctx context.Context, req *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodeHealthCheck not implemented") -} -func (*UnimplementedIndexServer) NodeInfo(ctx context.Context, req *empty.Empty) (*NodeInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodeInfo not implemented") -} -func (*UnimplementedIndexServer) ClusterJoin(ctx context.Context, req *ClusterJoinRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method ClusterJoin not implemented") -} -func (*UnimplementedIndexServer) ClusterLeave(ctx context.Context, req *ClusterLeaveRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method ClusterLeave not implemented") -} -func (*UnimplementedIndexServer) ClusterInfo(ctx context.Context, req *empty.Empty) (*ClusterInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ClusterInfo not implemented") -} -func (*UnimplementedIndexServer) ClusterWatch(req *empty.Empty, srv Index_ClusterWatchServer) error { - return status.Errorf(codes.Unimplemented, "method ClusterWatch not implemented") -} -func (*UnimplementedIndexServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") -} -func (*UnimplementedIndexServer) Index(ctx context.Context, req *IndexRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Index not implemented") -} -func (*UnimplementedIndexServer) Delete(ctx context.Context, req *DeleteRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} -func (*UnimplementedIndexServer) BulkIndex(ctx context.Context, req *BulkIndexRequest) (*BulkIndexResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method BulkIndex not implemented") -} -func (*UnimplementedIndexServer) BulkDelete(ctx context.Context, req *BulkDeleteRequest) (*BulkDeleteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method BulkDelete not implemented") -} -func (*UnimplementedIndexServer) Search(ctx context.Context, req *SearchRequest) (*SearchResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Search not implemented") -} -func (*UnimplementedIndexServer) GetIndexConfig(ctx context.Context, req *empty.Empty) (*GetIndexConfigResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetIndexConfig not implemented") -} -func (*UnimplementedIndexServer) GetIndexStats(ctx context.Context, req *empty.Empty) (*GetIndexStatsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetIndexStats not implemented") -} -func (*UnimplementedIndexServer) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Snapshot not implemented") -} - -func RegisterIndexServer(s *grpc.Server, srv IndexServer) { - s.RegisterService(&_Index_serviceDesc, srv) -} - -func _Index_NodeHealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeHealthCheckRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).NodeHealthCheck(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/NodeHealthCheck", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).NodeHealthCheck(ctx, req.(*NodeHealthCheckRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_NodeInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).NodeInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/NodeInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).NodeInfo(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_ClusterJoin_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ClusterJoinRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).ClusterJoin(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/ClusterJoin", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).ClusterJoin(ctx, req.(*ClusterJoinRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_ClusterLeave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ClusterLeaveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).ClusterLeave(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/ClusterLeave", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).ClusterLeave(ctx, req.(*ClusterLeaveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_ClusterInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).ClusterInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/ClusterInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).ClusterInfo(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_ClusterWatch_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(empty.Empty) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(IndexServer).ClusterWatch(m, &indexClusterWatchServer{stream}) -} - -type Index_ClusterWatchServer interface { - Send(*ClusterWatchResponse) error - grpc.ServerStream -} - -type indexClusterWatchServer struct { - grpc.ServerStream -} - -func (x *indexClusterWatchServer) Send(m *ClusterWatchResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Index_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).Get(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/Get", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).Get(ctx, req.(*GetRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_Index_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(IndexRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).Index(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/Index", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).Index(ctx, req.(*IndexRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).Delete(ctx, req.(*DeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_BulkIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(BulkIndexRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).BulkIndex(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/BulkIndex", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).BulkIndex(ctx, req.(*BulkIndexRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_BulkDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(BulkDeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).BulkDelete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/BulkDelete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).BulkDelete(ctx, req.(*BulkDeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SearchRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).Search(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/Search", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).Search(ctx, req.(*SearchRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_GetIndexConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).GetIndexConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/GetIndexConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).GetIndexConfig(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_GetIndexStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).GetIndexStats(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/GetIndexStats", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).GetIndexStats(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_Snapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).Snapshot(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/Snapshot", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).Snapshot(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -var _Index_serviceDesc = grpc.ServiceDesc{ - ServiceName: "index.Index", - HandlerType: (*IndexServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "NodeHealthCheck", - Handler: _Index_NodeHealthCheck_Handler, - }, - { - MethodName: "NodeInfo", - Handler: _Index_NodeInfo_Handler, - }, - { - MethodName: "ClusterJoin", - Handler: _Index_ClusterJoin_Handler, - }, - { - MethodName: "ClusterLeave", - Handler: _Index_ClusterLeave_Handler, - }, - { - MethodName: "ClusterInfo", - Handler: _Index_ClusterInfo_Handler, - }, - { - MethodName: "Get", - Handler: _Index_Get_Handler, - }, - { - MethodName: "Index", - Handler: _Index_Index_Handler, - }, - { - MethodName: "Delete", - Handler: _Index_Delete_Handler, - }, - { - MethodName: "BulkIndex", - Handler: _Index_BulkIndex_Handler, - }, - { - MethodName: "BulkDelete", - Handler: _Index_BulkDelete_Handler, - }, - { - MethodName: "Search", - Handler: _Index_Search_Handler, - }, - { - MethodName: "GetIndexConfig", - Handler: _Index_GetIndexConfig_Handler, - }, - { - MethodName: "GetIndexStats", - Handler: _Index_GetIndexStats_Handler, - }, - { - MethodName: "Snapshot", - Handler: _Index_Snapshot_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ClusterWatch", - Handler: _Index_ClusterWatch_Handler, - ServerStreams: true, - }, - }, - Metadata: "protobuf/index/index.proto", -} diff --git a/protobuf/index/index.pb.gw.go b/protobuf/index/index.pb.gw.go deleted file mode 100644 index a54291a..0000000 --- a/protobuf/index/index.pb.gw.go +++ /dev/null @@ -1,510 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: protobuf/index/index.proto - -/* -Package index is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package index - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/empty" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray - -var ( - filter_Index_NodeHealthCheck_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Index_NodeHealthCheck_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq NodeHealthCheckRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Index_NodeHealthCheck_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.NodeHealthCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Index_NodeInfo_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq empty.Empty - var metadata runtime.ServerMetadata - - msg, err := client.NodeInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Index_ClusterInfo_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq empty.Empty - var metadata runtime.ServerMetadata - - msg, err := client.ClusterInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Index_Get_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") - } - - protoReq.Id, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) - } - - msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Index_Index_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq IndexRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Index(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Index_Index_1(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq IndexRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") - } - - protoReq.Id, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) - } - - msg, err := client.Index(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Index_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DeleteRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["id"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") - } - - protoReq.Id, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) - } - - msg, err := client.Delete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Index_BulkIndex_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq BulkIndexRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.BulkIndex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Index_BulkDelete_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq BulkDeleteRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.BulkDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Index_Search_0(ctx context.Context, marshaler runtime.Marshaler, client IndexClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SearchRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Search(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -// RegisterIndexHandlerFromEndpoint is same as RegisterIndexHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterIndexHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterIndexHandler(ctx, mux, conn) -} - -// RegisterIndexHandler registers the http handlers for service Index to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterIndexHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterIndexHandlerClient(ctx, mux, NewIndexClient(conn)) -} - -// RegisterIndexHandlerClient registers the http handlers for service Index -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "IndexClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "IndexClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "IndexClient" to call the correct interceptors. -func RegisterIndexHandlerClient(ctx context.Context, mux *runtime.ServeMux, client IndexClient) error { - - mux.Handle("GET", pattern_Index_NodeHealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_NodeHealthCheck_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_NodeHealthCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Index_NodeInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_NodeInfo_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_NodeInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Index_ClusterInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_ClusterInfo_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_ClusterInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Index_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_Get_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("PUT", pattern_Index_Index_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_Index_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_Index_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("PUT", pattern_Index_Index_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_Index_1(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_Index_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_Index_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_Delete_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("PUT", pattern_Index_BulkIndex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_BulkIndex_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_BulkIndex_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_Index_BulkDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_BulkDelete_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_BulkDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Index_Search_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Index_Search_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Index_Search_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Index_NodeHealthCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "healthcheck"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Index_NodeInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "status"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Index_ClusterInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "cluster", "status"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Index_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Index_Index_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "documents"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Index_Index_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Index_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "documents", "id"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Index_BulkIndex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "bulk"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Index_BulkDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "bulk"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Index_Search_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "search"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Index_NodeHealthCheck_0 = runtime.ForwardResponseMessage - - forward_Index_NodeInfo_0 = runtime.ForwardResponseMessage - - forward_Index_ClusterInfo_0 = runtime.ForwardResponseMessage - - forward_Index_Get_0 = runtime.ForwardResponseMessage - - forward_Index_Index_0 = runtime.ForwardResponseMessage - - forward_Index_Index_1 = runtime.ForwardResponseMessage - - forward_Index_Delete_0 = runtime.ForwardResponseMessage - - forward_Index_BulkIndex_0 = runtime.ForwardResponseMessage - - forward_Index_BulkDelete_0 = runtime.ForwardResponseMessage - - forward_Index_Search_0 = runtime.ForwardResponseMessage -) diff --git a/protobuf/index/index.proto b/protobuf/index/index.proto deleted file mode 100644 index 5dee6a8..0000000 --- a/protobuf/index/index.proto +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -import "google/protobuf/any.proto"; -import "google/protobuf/empty.proto"; -import "google/api/annotations.proto"; - -package index; - -option go_package = "github.com/mosuka/blast/protobuf/index"; - -service Index { - rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) { - option (google.api.http) = { - get: "/v1/node/healthcheck" - }; - } - rpc NodeInfo (google.protobuf.Empty) returns (NodeInfoResponse) { - option (google.api.http) = { - get: "/v1/node/status" - }; - } - - rpc ClusterJoin (ClusterJoinRequest) returns (google.protobuf.Empty) {} - rpc ClusterLeave (ClusterLeaveRequest) returns (google.protobuf.Empty) {} - rpc ClusterInfo (google.protobuf.Empty) returns (ClusterInfoResponse) { - option (google.api.http) = { - get: "/v1/cluster/status" - }; - } - rpc ClusterWatch (google.protobuf.Empty) returns (stream ClusterWatchResponse) {} - - rpc Get (GetRequest) returns (GetResponse) { - option (google.api.http) = { - get: "/v1/documents/{id=**}" - }; - } - rpc Index (IndexRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - put: "/v1/documents" - body: "*" - additional_bindings { - put: "/v1/documents/{id=**}" - body: "*" - } - }; - } - rpc Delete (DeleteRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/documents/{id=**}" - }; - } - rpc BulkIndex (BulkIndexRequest) returns (BulkIndexResponse) { - option (google.api.http) = { - put: "/v1/bulk" - body: "*" - }; - } - rpc BulkDelete (BulkDeleteRequest) returns (BulkDeleteResponse) { - option (google.api.http) = { - delete: "/v1/bulk" - body: "*" - }; - } - rpc Search (SearchRequest) returns (SearchResponse) { - option (google.api.http) = { - post: "/v1/search" - body: "*" - }; - } - rpc GetIndexConfig (google.protobuf.Empty) returns (GetIndexConfigResponse) {} - rpc GetIndexStats (google.protobuf.Empty) returns (GetIndexStatsResponse) {} - rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) {} -} - -message NodeHealthCheckRequest { - enum Probe { - UNKNOWN = 0; - HEALTHINESS = 1; - LIVENESS = 2; - READINESS = 3; - } - Probe probe = 1; -} - -message NodeHealthCheckResponse { - enum State { - UNKNOWN = 0; - HEALTHY = 1; - UNHEALTHY = 2; - ALIVE = 3; - DEAD = 4; - READY = 5; - NOT_READY = 6; - } - State state = 1; -} - -message Metadata { - string grpc_address = 1; - string grpc_gateway_address = 2; - string http_address = 3; -} - -message Node { - enum State { - UNKNOWN = 0; - FOLLOWER = 1; - CANDIDATE = 2; - LEADER = 3; - SHUTDOWN = 4; - } - string id = 1; - string bind_address = 2; - State state = 3; - Metadata metadata = 4; -} - -message Cluster { - map nodes = 1; -} - -message NodeInfoResponse { - Node node = 1; -} - -message ClusterJoinRequest { - Node node = 1; -} - -message ClusterLeaveRequest { - string id = 1; -} - -message ClusterInfoResponse { - Cluster cluster = 1; -} - -message ClusterWatchResponse { - enum Event { - UNKNOWN = 0; - JOIN = 1; - LEAVE = 2; - UPDATE = 3; - } - Event event = 1; - Node node = 2; - Cluster cluster = 3; -} - -message GetRequest { - string id = 1; -} - -message GetResponse { -// Document document = 1; - google.protobuf.Any fields = 1; -} - -message IndexRequest { - string id = 1; - google.protobuf.Any fields = 2; -} - -message DeleteRequest { - string id = 1; -} - -message Document { - string id = 1; - google.protobuf.Any fields = 2; -} - -message BulkIndexRequest { - repeated Document documents = 1; -} - -message BulkIndexResponse { - int32 count = 1; -} - -message BulkDeleteRequest { - repeated string ids = 1; -} - -message BulkDeleteResponse { - int32 count = 1; -} - -message SearchRequest { - google.protobuf.Any search_request = 1; -} - -message SearchResponse { - google.protobuf.Any search_result = 1; -} - -message IndexConfig { - google.protobuf.Any index_mapping = 1; - string index_type = 2; - string index_storage_type = 3; -} - -message GetIndexConfigResponse { - IndexConfig index_config = 1; -} - -message GetIndexStatsResponse { - google.protobuf.Any index_stats = 1; -} - -message Proposal { - enum Event { - UNKNOWN = 0; - SET_NODE = 1; - DELETE_NODE = 2; - INDEX = 3; - DELETE = 4; - BULK_INDEX = 5; - BULK_DELETE = 6; - } - Event event = 1; - Node node = 2; - Document document = 3; - string id = 4; - repeated Document documents = 5; - repeated string ids = 6; -} diff --git a/protobuf/index/index.swagger.json b/protobuf/index/index.swagger.json deleted file mode 100644 index 5d96593..0000000 --- a/protobuf/index/index.swagger.json +++ /dev/null @@ -1,557 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "protobuf/index/index.proto", - "version": "version not set" - }, - "schemes": [ - "http", - "https" - ], - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/v1/bulk": { - "delete": { - "operationId": "BulkDelete", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/indexBulkDeleteResponse" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/indexBulkDeleteRequest" - } - } - ], - "tags": [ - "Index" - ] - }, - "put": { - "operationId": "BulkIndex", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/indexBulkIndexResponse" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/indexBulkIndexRequest" - } - } - ], - "tags": [ - "Index" - ] - } - }, - "/v1/cluster/status": { - "get": { - "operationId": "ClusterInfo", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/indexClusterInfoResponse" - } - } - }, - "tags": [ - "Index" - ] - } - }, - "/v1/documents": { - "put": { - "operationId": "Index", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "properties": {} - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/indexIndexRequest" - } - } - ], - "tags": [ - "Index" - ] - } - }, - "/v1/documents/{id}": { - "get": { - "operationId": "Get", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/indexGetResponse" - } - } - }, - "parameters": [ - { - "name": "id", - "in": "path", - "required": true, - "type": "string" - } - ], - "tags": [ - "Index" - ] - }, - "delete": { - "operationId": "Delete", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "properties": {} - } - } - }, - "parameters": [ - { - "name": "id", - "in": "path", - "required": true, - "type": "string" - } - ], - "tags": [ - "Index" - ] - }, - "put": { - "operationId": "Index2", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "properties": {} - } - } - }, - "parameters": [ - { - "name": "id", - "in": "path", - "required": true, - "type": "string" - }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/indexIndexRequest" - } - } - ], - "tags": [ - "Index" - ] - } - }, - "/v1/node/healthcheck": { - "get": { - "operationId": "NodeHealthCheck", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/indexNodeHealthCheckResponse" - } - } - }, - "parameters": [ - { - "name": "probe", - "in": "query", - "required": false, - "type": "string", - "enum": [ - "UNKNOWN", - "HEALTHINESS", - "LIVENESS", - "READINESS" - ], - "default": "UNKNOWN" - } - ], - "tags": [ - "Index" - ] - } - }, - "/v1/node/status": { - "get": { - "operationId": "NodeInfo", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/indexNodeInfoResponse" - } - } - }, - "tags": [ - "Index" - ] - } - }, - "/v1/search": { - "post": { - "operationId": "Search", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/indexSearchResponse" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/indexSearchRequest" - } - } - ], - "tags": [ - "Index" - ] - } - } - }, - "definitions": { - "NodeHealthCheckRequestProbe": { - "type": "string", - "enum": [ - "UNKNOWN", - "HEALTHINESS", - "LIVENESS", - "READINESS" - ], - "default": "UNKNOWN" - }, - "indexBulkDeleteRequest": { - "type": "object", - "properties": { - "ids": { - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "indexBulkDeleteResponse": { - "type": "object", - "properties": { - "count": { - "type": "integer", - "format": "int32" - } - } - }, - "indexBulkIndexRequest": { - "type": "object", - "properties": { - "documents": { - "type": "array", - "items": { - "$ref": "#/definitions/indexDocument" - } - } - } - }, - "indexBulkIndexResponse": { - "type": "object", - "properties": { - "count": { - "type": "integer", - "format": "int32" - } - } - }, - "indexCluster": { - "type": "object", - "properties": { - "nodes": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/indexNode" - } - } - } - }, - "indexClusterInfoResponse": { - "type": "object", - "properties": { - "cluster": { - "$ref": "#/definitions/indexCluster" - } - } - }, - "indexClusterWatchResponse": { - "type": "object", - "properties": { - "event": { - "$ref": "#/definitions/indexClusterWatchResponseEvent" - }, - "node": { - "$ref": "#/definitions/indexNode" - }, - "cluster": { - "$ref": "#/definitions/indexCluster" - } - } - }, - "indexClusterWatchResponseEvent": { - "type": "string", - "enum": [ - "UNKNOWN", - "JOIN", - "LEAVE", - "UPDATE" - ], - "default": "UNKNOWN" - }, - "indexDocument": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "fields": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "indexGetIndexConfigResponse": { - "type": "object", - "properties": { - "index_config": { - "$ref": "#/definitions/indexIndexConfig" - } - } - }, - "indexGetIndexStatsResponse": { - "type": "object", - "properties": { - "index_stats": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "indexGetResponse": { - "type": "object", - "properties": { - "fields": { - "$ref": "#/definitions/protobufAny", - "title": "Document document = 1;" - } - } - }, - "indexIndexConfig": { - "type": "object", - "properties": { - "index_mapping": { - "$ref": "#/definitions/protobufAny" - }, - "index_type": { - "type": "string" - }, - "index_storage_type": { - "type": "string" - } - } - }, - "indexIndexRequest": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "fields": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "indexMetadata": { - "type": "object", - "properties": { - "grpc_address": { - "type": "string" - }, - "grpc_gateway_address": { - "type": "string" - }, - "http_address": { - "type": "string" - } - } - }, - "indexNode": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "bind_address": { - "type": "string" - }, - "state": { - "$ref": "#/definitions/indexNodeState" - }, - "metadata": { - "$ref": "#/definitions/indexMetadata" - } - } - }, - "indexNodeHealthCheckResponse": { - "type": "object", - "properties": { - "state": { - "$ref": "#/definitions/indexNodeHealthCheckResponseState" - } - } - }, - "indexNodeHealthCheckResponseState": { - "type": "string", - "enum": [ - "UNKNOWN", - "HEALTHY", - "UNHEALTHY", - "ALIVE", - "DEAD", - "READY", - "NOT_READY" - ], - "default": "UNKNOWN" - }, - "indexNodeInfoResponse": { - "type": "object", - "properties": { - "node": { - "$ref": "#/definitions/indexNode" - } - } - }, - "indexNodeState": { - "type": "string", - "enum": [ - "UNKNOWN", - "FOLLOWER", - "CANDIDATE", - "LEADER", - "SHUTDOWN" - ], - "default": "UNKNOWN" - }, - "indexSearchRequest": { - "type": "object", - "properties": { - "search_request": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "indexSearchResponse": { - "type": "object", - "properties": { - "search_result": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string", - "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." - }, - "value": { - "type": "string", - "format": "byte", - "description": "Must be a valid serialized protocol buffer of the above specified type." - } - }, - "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" - }, - "runtimeStreamError": { - "type": "object", - "properties": { - "grpc_code": { - "type": "integer", - "format": "int32" - }, - "http_code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "http_status": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - } - }, - "x-stream-definitions": { - "indexClusterWatchResponse": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/indexClusterWatchResponse" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of indexClusterWatchResponse" - } - } -} diff --git a/protobuf/management/management.pb.go b/protobuf/management/management.pb.go deleted file mode 100644 index 40577fb..0000000 --- a/protobuf/management/management.pb.go +++ /dev/null @@ -1,1649 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: protobuf/management/management.proto - -package management - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" - empty "github.com/golang/protobuf/ptypes/empty" - _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type NodeHealthCheckRequest_Probe int32 - -const ( - NodeHealthCheckRequest_UNKNOWN NodeHealthCheckRequest_Probe = 0 - NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 1 - NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 2 - NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 3 -) - -var NodeHealthCheckRequest_Probe_name = map[int32]string{ - 0: "UNKNOWN", - 1: "HEALTHINESS", - 2: "LIVENESS", - 3: "READINESS", -} - -var NodeHealthCheckRequest_Probe_value = map[string]int32{ - "UNKNOWN": 0, - "HEALTHINESS": 1, - "LIVENESS": 2, - "READINESS": 3, -} - -func (x NodeHealthCheckRequest_Probe) String() string { - return proto.EnumName(NodeHealthCheckRequest_Probe_name, int32(x)) -} - -func (NodeHealthCheckRequest_Probe) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{0, 0} -} - -type NodeHealthCheckResponse_State int32 - -const ( - NodeHealthCheckResponse_UNKNOWN NodeHealthCheckResponse_State = 0 - NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 1 - NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 2 - NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 3 - NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 4 - NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 5 - NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 6 -) - -var NodeHealthCheckResponse_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "HEALTHY", - 2: "UNHEALTHY", - 3: "ALIVE", - 4: "DEAD", - 5: "READY", - 6: "NOT_READY", -} - -var NodeHealthCheckResponse_State_value = map[string]int32{ - "UNKNOWN": 0, - "HEALTHY": 1, - "UNHEALTHY": 2, - "ALIVE": 3, - "DEAD": 4, - "READY": 5, - "NOT_READY": 6, -} - -func (x NodeHealthCheckResponse_State) String() string { - return proto.EnumName(NodeHealthCheckResponse_State_name, int32(x)) -} - -func (NodeHealthCheckResponse_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{1, 0} -} - -type Node_State int32 - -const ( - Node_UNKNOWN Node_State = 0 - Node_FOLLOWER Node_State = 1 - Node_CANDIDATE Node_State = 2 - Node_LEADER Node_State = 3 - Node_SHUTDOWN Node_State = 4 -) - -var Node_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "FOLLOWER", - 2: "CANDIDATE", - 3: "LEADER", - 4: "SHUTDOWN", -} - -var Node_State_value = map[string]int32{ - "UNKNOWN": 0, - "FOLLOWER": 1, - "CANDIDATE": 2, - "LEADER": 3, - "SHUTDOWN": 4, -} - -func (x Node_State) String() string { - return proto.EnumName(Node_State_name, int32(x)) -} - -func (Node_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{3, 0} -} - -type ClusterWatchResponse_Event int32 - -const ( - ClusterWatchResponse_UNKNOWN ClusterWatchResponse_Event = 0 - ClusterWatchResponse_JOIN ClusterWatchResponse_Event = 1 - ClusterWatchResponse_LEAVE ClusterWatchResponse_Event = 2 - ClusterWatchResponse_UPDATE ClusterWatchResponse_Event = 3 -) - -var ClusterWatchResponse_Event_name = map[int32]string{ - 0: "UNKNOWN", - 1: "JOIN", - 2: "LEAVE", - 3: "UPDATE", -} - -var ClusterWatchResponse_Event_value = map[string]int32{ - "UNKNOWN": 0, - "JOIN": 1, - "LEAVE": 2, - "UPDATE": 3, -} - -func (x ClusterWatchResponse_Event) String() string { - return proto.EnumName(ClusterWatchResponse_Event_name, int32(x)) -} - -func (ClusterWatchResponse_Event) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{9, 0} -} - -type WatchResponse_Command int32 - -const ( - WatchResponse_UNKNOWN WatchResponse_Command = 0 - WatchResponse_SET WatchResponse_Command = 1 - WatchResponse_DELETE WatchResponse_Command = 2 -) - -var WatchResponse_Command_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SET", - 2: "DELETE", -} - -var WatchResponse_Command_value = map[string]int32{ - "UNKNOWN": 0, - "SET": 1, - "DELETE": 2, -} - -func (x WatchResponse_Command) String() string { - return proto.EnumName(WatchResponse_Command_name, int32(x)) -} - -func (WatchResponse_Command) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{16, 0} -} - -type Proposal_Event int32 - -const ( - Proposal_UNKNOWN Proposal_Event = 0 - Proposal_SET_NODE Proposal_Event = 1 - Proposal_DELETE_NODE Proposal_Event = 2 - Proposal_SET_VALUE Proposal_Event = 3 - Proposal_DELETE_VALUE Proposal_Event = 4 -) - -var Proposal_Event_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SET_NODE", - 2: "DELETE_NODE", - 3: "SET_VALUE", - 4: "DELETE_VALUE", -} - -var Proposal_Event_value = map[string]int32{ - "UNKNOWN": 0, - "SET_NODE": 1, - "DELETE_NODE": 2, - "SET_VALUE": 3, - "DELETE_VALUE": 4, -} - -func (x Proposal_Event) String() string { - return proto.EnumName(Proposal_Event_name, int32(x)) -} - -func (Proposal_Event) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{17, 0} -} - -type NodeHealthCheckRequest struct { - Probe NodeHealthCheckRequest_Probe `protobuf:"varint,1,opt,name=probe,proto3,enum=management.NodeHealthCheckRequest_Probe" json:"probe,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeHealthCheckRequest) Reset() { *m = NodeHealthCheckRequest{} } -func (m *NodeHealthCheckRequest) String() string { return proto.CompactTextString(m) } -func (*NodeHealthCheckRequest) ProtoMessage() {} -func (*NodeHealthCheckRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{0} -} - -func (m *NodeHealthCheckRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeHealthCheckRequest.Unmarshal(m, b) -} -func (m *NodeHealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeHealthCheckRequest.Marshal(b, m, deterministic) -} -func (m *NodeHealthCheckRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeHealthCheckRequest.Merge(m, src) -} -func (m *NodeHealthCheckRequest) XXX_Size() int { - return xxx_messageInfo_NodeHealthCheckRequest.Size(m) -} -func (m *NodeHealthCheckRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeHealthCheckRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeHealthCheckRequest proto.InternalMessageInfo - -func (m *NodeHealthCheckRequest) GetProbe() NodeHealthCheckRequest_Probe { - if m != nil { - return m.Probe - } - return NodeHealthCheckRequest_UNKNOWN -} - -type NodeHealthCheckResponse struct { - State NodeHealthCheckResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=management.NodeHealthCheckResponse_State" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeHealthCheckResponse) Reset() { *m = NodeHealthCheckResponse{} } -func (m *NodeHealthCheckResponse) String() string { return proto.CompactTextString(m) } -func (*NodeHealthCheckResponse) ProtoMessage() {} -func (*NodeHealthCheckResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{1} -} - -func (m *NodeHealthCheckResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeHealthCheckResponse.Unmarshal(m, b) -} -func (m *NodeHealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeHealthCheckResponse.Marshal(b, m, deterministic) -} -func (m *NodeHealthCheckResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeHealthCheckResponse.Merge(m, src) -} -func (m *NodeHealthCheckResponse) XXX_Size() int { - return xxx_messageInfo_NodeHealthCheckResponse.Size(m) -} -func (m *NodeHealthCheckResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeHealthCheckResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeHealthCheckResponse proto.InternalMessageInfo - -func (m *NodeHealthCheckResponse) GetState() NodeHealthCheckResponse_State { - if m != nil { - return m.State - } - return NodeHealthCheckResponse_UNKNOWN -} - -type Metadata struct { - GrpcAddress string `protobuf:"bytes,1,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` - GrpcGatewayAddress string `protobuf:"bytes,2,opt,name=grpc_gateway_address,json=grpcGatewayAddress,proto3" json:"grpc_gateway_address,omitempty"` - HttpAddress string `protobuf:"bytes,3,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{2} -} - -func (m *Metadata) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metadata.Unmarshal(m, b) -} -func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) -} -func (m *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(m, src) -} -func (m *Metadata) XXX_Size() int { - return xxx_messageInfo_Metadata.Size(m) -} -func (m *Metadata) XXX_DiscardUnknown() { - xxx_messageInfo_Metadata.DiscardUnknown(m) -} - -var xxx_messageInfo_Metadata proto.InternalMessageInfo - -func (m *Metadata) GetGrpcAddress() string { - if m != nil { - return m.GrpcAddress - } - return "" -} - -func (m *Metadata) GetGrpcGatewayAddress() string { - if m != nil { - return m.GrpcGatewayAddress - } - return "" -} - -func (m *Metadata) GetHttpAddress() string { - if m != nil { - return m.HttpAddress - } - return "" -} - -type Node struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - BindAddress string `protobuf:"bytes,2,opt,name=bind_address,json=bindAddress,proto3" json:"bind_address,omitempty"` - State Node_State `protobuf:"varint,3,opt,name=state,proto3,enum=management.Node_State" json:"state,omitempty"` - Metadata *Metadata `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Node) Reset() { *m = Node{} } -func (m *Node) String() string { return proto.CompactTextString(m) } -func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{3} -} - -func (m *Node) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Node.Unmarshal(m, b) -} -func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Node.Marshal(b, m, deterministic) -} -func (m *Node) XXX_Merge(src proto.Message) { - xxx_messageInfo_Node.Merge(m, src) -} -func (m *Node) XXX_Size() int { - return xxx_messageInfo_Node.Size(m) -} -func (m *Node) XXX_DiscardUnknown() { - xxx_messageInfo_Node.DiscardUnknown(m) -} - -var xxx_messageInfo_Node proto.InternalMessageInfo - -func (m *Node) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *Node) GetBindAddress() string { - if m != nil { - return m.BindAddress - } - return "" -} - -func (m *Node) GetState() Node_State { - if m != nil { - return m.State - } - return Node_UNKNOWN -} - -func (m *Node) GetMetadata() *Metadata { - if m != nil { - return m.Metadata - } - return nil -} - -type Cluster struct { - Nodes map[string]*Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Cluster) Reset() { *m = Cluster{} } -func (m *Cluster) String() string { return proto.CompactTextString(m) } -func (*Cluster) ProtoMessage() {} -func (*Cluster) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{4} -} - -func (m *Cluster) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Cluster.Unmarshal(m, b) -} -func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) -} -func (m *Cluster) XXX_Merge(src proto.Message) { - xxx_messageInfo_Cluster.Merge(m, src) -} -func (m *Cluster) XXX_Size() int { - return xxx_messageInfo_Cluster.Size(m) -} -func (m *Cluster) XXX_DiscardUnknown() { - xxx_messageInfo_Cluster.DiscardUnknown(m) -} - -var xxx_messageInfo_Cluster proto.InternalMessageInfo - -func (m *Cluster) GetNodes() map[string]*Node { - if m != nil { - return m.Nodes - } - return nil -} - -type NodeInfoResponse struct { - Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeInfoResponse) Reset() { *m = NodeInfoResponse{} } -func (m *NodeInfoResponse) String() string { return proto.CompactTextString(m) } -func (*NodeInfoResponse) ProtoMessage() {} -func (*NodeInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{5} -} - -func (m *NodeInfoResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeInfoResponse.Unmarshal(m, b) -} -func (m *NodeInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeInfoResponse.Marshal(b, m, deterministic) -} -func (m *NodeInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeInfoResponse.Merge(m, src) -} -func (m *NodeInfoResponse) XXX_Size() int { - return xxx_messageInfo_NodeInfoResponse.Size(m) -} -func (m *NodeInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeInfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeInfoResponse proto.InternalMessageInfo - -func (m *NodeInfoResponse) GetNode() *Node { - if m != nil { - return m.Node - } - return nil -} - -type ClusterJoinRequest struct { - Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClusterJoinRequest) Reset() { *m = ClusterJoinRequest{} } -func (m *ClusterJoinRequest) String() string { return proto.CompactTextString(m) } -func (*ClusterJoinRequest) ProtoMessage() {} -func (*ClusterJoinRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{6} -} - -func (m *ClusterJoinRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClusterJoinRequest.Unmarshal(m, b) -} -func (m *ClusterJoinRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClusterJoinRequest.Marshal(b, m, deterministic) -} -func (m *ClusterJoinRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterJoinRequest.Merge(m, src) -} -func (m *ClusterJoinRequest) XXX_Size() int { - return xxx_messageInfo_ClusterJoinRequest.Size(m) -} -func (m *ClusterJoinRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterJoinRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterJoinRequest proto.InternalMessageInfo - -func (m *ClusterJoinRequest) GetNode() *Node { - if m != nil { - return m.Node - } - return nil -} - -type ClusterLeaveRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClusterLeaveRequest) Reset() { *m = ClusterLeaveRequest{} } -func (m *ClusterLeaveRequest) String() string { return proto.CompactTextString(m) } -func (*ClusterLeaveRequest) ProtoMessage() {} -func (*ClusterLeaveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{7} -} - -func (m *ClusterLeaveRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClusterLeaveRequest.Unmarshal(m, b) -} -func (m *ClusterLeaveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClusterLeaveRequest.Marshal(b, m, deterministic) -} -func (m *ClusterLeaveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterLeaveRequest.Merge(m, src) -} -func (m *ClusterLeaveRequest) XXX_Size() int { - return xxx_messageInfo_ClusterLeaveRequest.Size(m) -} -func (m *ClusterLeaveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterLeaveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterLeaveRequest proto.InternalMessageInfo - -func (m *ClusterLeaveRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -type ClusterInfoResponse struct { - Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClusterInfoResponse) Reset() { *m = ClusterInfoResponse{} } -func (m *ClusterInfoResponse) String() string { return proto.CompactTextString(m) } -func (*ClusterInfoResponse) ProtoMessage() {} -func (*ClusterInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{8} -} - -func (m *ClusterInfoResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClusterInfoResponse.Unmarshal(m, b) -} -func (m *ClusterInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClusterInfoResponse.Marshal(b, m, deterministic) -} -func (m *ClusterInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterInfoResponse.Merge(m, src) -} -func (m *ClusterInfoResponse) XXX_Size() int { - return xxx_messageInfo_ClusterInfoResponse.Size(m) -} -func (m *ClusterInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterInfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterInfoResponse proto.InternalMessageInfo - -func (m *ClusterInfoResponse) GetCluster() *Cluster { - if m != nil { - return m.Cluster - } - return nil -} - -type ClusterWatchResponse struct { - Event ClusterWatchResponse_Event `protobuf:"varint,1,opt,name=event,proto3,enum=management.ClusterWatchResponse_Event" json:"event,omitempty"` - Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` - Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster,proto3" json:"cluster,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClusterWatchResponse) Reset() { *m = ClusterWatchResponse{} } -func (m *ClusterWatchResponse) String() string { return proto.CompactTextString(m) } -func (*ClusterWatchResponse) ProtoMessage() {} -func (*ClusterWatchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{9} -} - -func (m *ClusterWatchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClusterWatchResponse.Unmarshal(m, b) -} -func (m *ClusterWatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClusterWatchResponse.Marshal(b, m, deterministic) -} -func (m *ClusterWatchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterWatchResponse.Merge(m, src) -} -func (m *ClusterWatchResponse) XXX_Size() int { - return xxx_messageInfo_ClusterWatchResponse.Size(m) -} -func (m *ClusterWatchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterWatchResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterWatchResponse proto.InternalMessageInfo - -func (m *ClusterWatchResponse) GetEvent() ClusterWatchResponse_Event { - if m != nil { - return m.Event - } - return ClusterWatchResponse_UNKNOWN -} - -func (m *ClusterWatchResponse) GetNode() *Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *ClusterWatchResponse) GetCluster() *Cluster { - if m != nil { - return m.Cluster - } - return nil -} - -type KeyValue struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value *any.Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (m *KeyValue) String() string { return proto.CompactTextString(m) } -func (*KeyValue) ProtoMessage() {} -func (*KeyValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{10} -} - -func (m *KeyValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_KeyValue.Unmarshal(m, b) -} -func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic) -} -func (m *KeyValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyValue.Merge(m, src) -} -func (m *KeyValue) XXX_Size() int { - return xxx_messageInfo_KeyValue.Size(m) -} -func (m *KeyValue) XXX_DiscardUnknown() { - xxx_messageInfo_KeyValue.DiscardUnknown(m) -} - -var xxx_messageInfo_KeyValue proto.InternalMessageInfo - -func (m *KeyValue) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *KeyValue) GetValue() *any.Any { - if m != nil { - return m.Value - } - return nil -} - -type GetRequest struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetRequest) Reset() { *m = GetRequest{} } -func (m *GetRequest) String() string { return proto.CompactTextString(m) } -func (*GetRequest) ProtoMessage() {} -func (*GetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{11} -} - -func (m *GetRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetRequest.Unmarshal(m, b) -} -func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) -} -func (m *GetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetRequest.Merge(m, src) -} -func (m *GetRequest) XXX_Size() int { - return xxx_messageInfo_GetRequest.Size(m) -} -func (m *GetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetRequest proto.InternalMessageInfo - -func (m *GetRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -type GetResponse struct { - // option (grpc.gateway.protoc_gen_swagger.options.openapiv2_schema) = { - // json_schema: { - // required: ["value"] - // }, - // example: { - // value: '{ "fields": { "field1": "Get Example", "field2": "This is an example Get response." } }' - // } - // }; - // google.protobuf.Any value = 1 [(grpc.gateway.protoc_gen_swagger.options.openapiv2_field) = {type: 6}]; - Value *any.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetResponse) Reset() { *m = GetResponse{} } -func (m *GetResponse) String() string { return proto.CompactTextString(m) } -func (*GetResponse) ProtoMessage() {} -func (*GetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{12} -} - -func (m *GetResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetResponse.Unmarshal(m, b) -} -func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) -} -func (m *GetResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetResponse.Merge(m, src) -} -func (m *GetResponse) XXX_Size() int { - return xxx_messageInfo_GetResponse.Size(m) -} -func (m *GetResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetResponse proto.InternalMessageInfo - -func (m *GetResponse) GetValue() *any.Any { - if m != nil { - return m.Value - } - return nil -} - -type SetRequest struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value *any.Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetRequest) Reset() { *m = SetRequest{} } -func (m *SetRequest) String() string { return proto.CompactTextString(m) } -func (*SetRequest) ProtoMessage() {} -func (*SetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{13} -} - -func (m *SetRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetRequest.Unmarshal(m, b) -} -func (m *SetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetRequest.Marshal(b, m, deterministic) -} -func (m *SetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetRequest.Merge(m, src) -} -func (m *SetRequest) XXX_Size() int { - return xxx_messageInfo_SetRequest.Size(m) -} -func (m *SetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetRequest proto.InternalMessageInfo - -func (m *SetRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *SetRequest) GetValue() *any.Any { - if m != nil { - return m.Value - } - return nil -} - -type DeleteRequest struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRequest) ProtoMessage() {} -func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{14} -} - -func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) -} -func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) -} -func (m *DeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRequest.Merge(m, src) -} -func (m *DeleteRequest) XXX_Size() int { - return xxx_messageInfo_DeleteRequest.Size(m) -} -func (m *DeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo - -func (m *DeleteRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -type WatchRequest struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchRequest) Reset() { *m = WatchRequest{} } -func (m *WatchRequest) String() string { return proto.CompactTextString(m) } -func (*WatchRequest) ProtoMessage() {} -func (*WatchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{15} -} - -func (m *WatchRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_WatchRequest.Unmarshal(m, b) -} -func (m *WatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_WatchRequest.Marshal(b, m, deterministic) -} -func (m *WatchRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchRequest.Merge(m, src) -} -func (m *WatchRequest) XXX_Size() int { - return xxx_messageInfo_WatchRequest.Size(m) -} -func (m *WatchRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WatchRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WatchRequest proto.InternalMessageInfo - -func (m *WatchRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -type WatchResponse struct { - Command WatchResponse_Command `protobuf:"varint,1,opt,name=command,proto3,enum=management.WatchResponse_Command" json:"command,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchResponse) Reset() { *m = WatchResponse{} } -func (m *WatchResponse) String() string { return proto.CompactTextString(m) } -func (*WatchResponse) ProtoMessage() {} -func (*WatchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{16} -} - -func (m *WatchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_WatchResponse.Unmarshal(m, b) -} -func (m *WatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_WatchResponse.Marshal(b, m, deterministic) -} -func (m *WatchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchResponse.Merge(m, src) -} -func (m *WatchResponse) XXX_Size() int { - return xxx_messageInfo_WatchResponse.Size(m) -} -func (m *WatchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WatchResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_WatchResponse proto.InternalMessageInfo - -func (m *WatchResponse) GetCommand() WatchResponse_Command { - if m != nil { - return m.Command - } - return WatchResponse_UNKNOWN -} - -func (m *WatchResponse) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *WatchResponse) GetValue() *any.Any { - if m != nil { - return m.Value - } - return nil -} - -type Proposal struct { - Event Proposal_Event `protobuf:"varint,1,opt,name=event,proto3,enum=management.Proposal_Event" json:"event,omitempty"` - Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` - KeyValue *KeyValue `protobuf:"bytes,3,opt,name=key_value,json=keyValue,proto3" json:"key_value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Proposal) Reset() { *m = Proposal{} } -func (m *Proposal) String() string { return proto.CompactTextString(m) } -func (*Proposal) ProtoMessage() {} -func (*Proposal) Descriptor() ([]byte, []int) { - return fileDescriptor_5e030ad796566078, []int{17} -} - -func (m *Proposal) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Proposal.Unmarshal(m, b) -} -func (m *Proposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Proposal.Marshal(b, m, deterministic) -} -func (m *Proposal) XXX_Merge(src proto.Message) { - xxx_messageInfo_Proposal.Merge(m, src) -} -func (m *Proposal) XXX_Size() int { - return xxx_messageInfo_Proposal.Size(m) -} -func (m *Proposal) XXX_DiscardUnknown() { - xxx_messageInfo_Proposal.DiscardUnknown(m) -} - -var xxx_messageInfo_Proposal proto.InternalMessageInfo - -func (m *Proposal) GetEvent() Proposal_Event { - if m != nil { - return m.Event - } - return Proposal_UNKNOWN -} - -func (m *Proposal) GetNode() *Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *Proposal) GetKeyValue() *KeyValue { - if m != nil { - return m.KeyValue - } - return nil -} - -func init() { - proto.RegisterEnum("management.NodeHealthCheckRequest_Probe", NodeHealthCheckRequest_Probe_name, NodeHealthCheckRequest_Probe_value) - proto.RegisterEnum("management.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) - proto.RegisterEnum("management.Node_State", Node_State_name, Node_State_value) - proto.RegisterEnum("management.ClusterWatchResponse_Event", ClusterWatchResponse_Event_name, ClusterWatchResponse_Event_value) - proto.RegisterEnum("management.WatchResponse_Command", WatchResponse_Command_name, WatchResponse_Command_value) - proto.RegisterEnum("management.Proposal_Event", Proposal_Event_name, Proposal_Event_value) - proto.RegisterType((*NodeHealthCheckRequest)(nil), "management.NodeHealthCheckRequest") - proto.RegisterType((*NodeHealthCheckResponse)(nil), "management.NodeHealthCheckResponse") - proto.RegisterType((*Metadata)(nil), "management.Metadata") - proto.RegisterType((*Node)(nil), "management.Node") - proto.RegisterType((*Cluster)(nil), "management.Cluster") - proto.RegisterMapType((map[string]*Node)(nil), "management.Cluster.NodesEntry") - proto.RegisterType((*NodeInfoResponse)(nil), "management.NodeInfoResponse") - proto.RegisterType((*ClusterJoinRequest)(nil), "management.ClusterJoinRequest") - proto.RegisterType((*ClusterLeaveRequest)(nil), "management.ClusterLeaveRequest") - proto.RegisterType((*ClusterInfoResponse)(nil), "management.ClusterInfoResponse") - proto.RegisterType((*ClusterWatchResponse)(nil), "management.ClusterWatchResponse") - proto.RegisterType((*KeyValue)(nil), "management.KeyValue") - proto.RegisterType((*GetRequest)(nil), "management.GetRequest") - proto.RegisterType((*GetResponse)(nil), "management.GetResponse") - proto.RegisterType((*SetRequest)(nil), "management.SetRequest") - proto.RegisterType((*DeleteRequest)(nil), "management.DeleteRequest") - proto.RegisterType((*WatchRequest)(nil), "management.WatchRequest") - proto.RegisterType((*WatchResponse)(nil), "management.WatchResponse") - proto.RegisterType((*Proposal)(nil), "management.Proposal") -} - -func init() { - proto.RegisterFile("protobuf/management/management.proto", fileDescriptor_5e030ad796566078) -} - -var fileDescriptor_5e030ad796566078 = []byte{ - // 1213 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0xcd, 0x72, 0xda, 0x56, - 0x14, 0x8e, 0x10, 0x18, 0x7c, 0x20, 0xb1, 0x72, 0xcd, 0xf8, 0x87, 0x7a, 0x52, 0x5b, 0x4d, 0x33, - 0xae, 0x13, 0x83, 0xe3, 0xb6, 0x33, 0xa9, 0xfb, 0x4b, 0x8c, 0x6a, 0x63, 0x13, 0x70, 0x05, 0xb6, - 0xc7, 0xdd, 0x78, 0x2e, 0x70, 0x03, 0x0c, 0x20, 0x51, 0x74, 0x71, 0xca, 0x74, 0xba, 0xc9, 0xb6, - 0xcb, 0x6e, 0xfb, 0x1e, 0x5d, 0xe4, 0x31, 0xfa, 0x02, 0x5d, 0x74, 0xba, 0xe9, 0x4b, 0x74, 0xee, - 0x8f, 0x64, 0x09, 0x84, 0xed, 0x76, 0x65, 0xe9, 0x9c, 0xef, 0x7c, 0xe7, 0x3b, 0xe7, 0x1e, 0x9d, - 0x8b, 0xe1, 0xf1, 0x60, 0x68, 0x53, 0xbb, 0x3e, 0x7a, 0x9d, 0xeb, 0x63, 0x0b, 0xb7, 0x48, 0x9f, - 0x58, 0xd4, 0xf7, 0x98, 0xe5, 0x6e, 0x04, 0xd7, 0x96, 0xcc, 0x6a, 0xcb, 0xb6, 0x5b, 0x3d, 0x92, - 0xf3, 0x02, 0xb1, 0x35, 0x16, 0xb0, 0xcc, 0x7b, 0x93, 0x2e, 0xd2, 0x1f, 0x50, 0xd7, 0xb9, 0x26, - 0x9d, 0x78, 0xd0, 0xc9, 0x61, 0xcb, 0xb2, 0x29, 0xa6, 0x1d, 0xdb, 0x72, 0xa4, 0xf7, 0x19, 0xff, - 0xd3, 0xd8, 0x6e, 0x11, 0x6b, 0xdb, 0x79, 0x83, 0x5b, 0x2d, 0x32, 0xcc, 0xd9, 0x03, 0x8e, 0x98, - 0x46, 0xeb, 0xbf, 0x29, 0xb0, 0x54, 0xb6, 0x9b, 0xe4, 0x90, 0xe0, 0x1e, 0x6d, 0xef, 0xb7, 0x49, - 0xa3, 0x6b, 0x92, 0x1f, 0x46, 0xc4, 0xa1, 0xe8, 0x2b, 0x88, 0x0d, 0x86, 0x76, 0x9d, 0xac, 0x28, - 0xeb, 0xca, 0xe6, 0x83, 0xdd, 0xcd, 0xac, 0xaf, 0x98, 0xf0, 0x90, 0xec, 0x09, 0xc3, 0x9b, 0x22, - 0x4c, 0x7f, 0x09, 0x31, 0xfe, 0x8e, 0x92, 0x10, 0x3f, 0x2d, 0x1f, 0x97, 0x2b, 0xe7, 0x65, 0xed, - 0x1e, 0x5a, 0x80, 0xe4, 0xa1, 0x91, 0x2f, 0xd5, 0x0e, 0x8b, 0x65, 0xa3, 0x5a, 0xd5, 0x14, 0x94, - 0x82, 0x44, 0xa9, 0x78, 0x66, 0xf0, 0xb7, 0x08, 0xba, 0x0f, 0xf3, 0xa6, 0x91, 0x2f, 0x08, 0xa7, - 0xaa, 0xbf, 0x53, 0x60, 0x79, 0x2a, 0x97, 0x33, 0xb0, 0x2d, 0x87, 0xa0, 0xaf, 0x21, 0xe6, 0x50, - 0x4c, 0x5d, 0x7d, 0x1f, 0xdd, 0xa8, 0x4f, 0xc4, 0x64, 0xab, 0x2c, 0xc0, 0x14, 0x71, 0xfa, 0x25, - 0xc4, 0xf8, 0x7b, 0x50, 0x60, 0x12, 0xe2, 0x42, 0xe0, 0x85, 0xa6, 0x30, 0x39, 0xa7, 0x65, 0xf7, - 0x35, 0x82, 0xe6, 0x21, 0x96, 0x67, 0x62, 0x35, 0x15, 0x25, 0x20, 0x5a, 0x30, 0xf2, 0x05, 0x2d, - 0xca, 0x8c, 0x4c, 0xf2, 0x85, 0x16, 0x63, 0xf0, 0x72, 0xa5, 0x76, 0x29, 0x5e, 0xe7, 0xf4, 0xb7, - 0x0a, 0x24, 0x5e, 0x11, 0x8a, 0x9b, 0x98, 0x62, 0xb4, 0x01, 0xa9, 0xd6, 0x70, 0xd0, 0xb8, 0xc4, - 0xcd, 0xe6, 0x90, 0x38, 0x0e, 0x57, 0x3d, 0x6f, 0x26, 0x99, 0x2d, 0x2f, 0x4c, 0x68, 0x07, 0xd2, - 0x1c, 0xd2, 0xc2, 0x94, 0xbc, 0xc1, 0x63, 0x0f, 0x1a, 0xe1, 0x50, 0xc4, 0x7c, 0x07, 0xc2, 0xe5, - 0x46, 0x6c, 0x40, 0xaa, 0x4d, 0xe9, 0xc0, 0x43, 0xaa, 0x82, 0x94, 0xd9, 0x24, 0x44, 0xff, 0x5b, - 0x81, 0x28, 0x6b, 0x07, 0x7a, 0x00, 0x91, 0x4e, 0x53, 0xa6, 0x8d, 0x74, 0x9a, 0x2c, 0xb6, 0xde, - 0xb1, 0x9a, 0x13, 0x59, 0x92, 0xcc, 0xe6, 0xd2, 0x3f, 0x73, 0x5b, 0xac, 0xf2, 0x16, 0x2f, 0x4d, - 0xb6, 0x38, 0xd0, 0x4f, 0xb4, 0x03, 0x89, 0xbe, 0xac, 0x76, 0x25, 0xba, 0xae, 0x6c, 0x26, 0x77, - 0xd3, 0xfe, 0x00, 0xb7, 0x13, 0xa6, 0x87, 0xd2, 0x8f, 0x43, 0x4f, 0x20, 0x05, 0x89, 0x6f, 0x2b, - 0xa5, 0x52, 0xe5, 0xdc, 0x30, 0xc5, 0x11, 0xec, 0xe7, 0xcb, 0x85, 0x62, 0x21, 0x5f, 0x33, 0xb4, - 0x08, 0x02, 0x98, 0x2b, 0x19, 0xf9, 0x82, 0x61, 0x6a, 0x2a, 0x03, 0x56, 0x0f, 0x4f, 0x6b, 0x05, - 0x16, 0x16, 0xd5, 0x7f, 0x51, 0x20, 0xbe, 0xdf, 0x1b, 0x39, 0x94, 0x0c, 0xd1, 0x27, 0x10, 0xb3, - 0xec, 0x26, 0x61, 0x5d, 0x56, 0x37, 0x93, 0xbb, 0x8f, 0xfc, 0x3a, 0x24, 0x86, 0x17, 0xe0, 0x18, - 0x16, 0x1d, 0x8e, 0x4d, 0x01, 0xce, 0x1c, 0x01, 0x5c, 0x1b, 0x91, 0x06, 0x6a, 0x97, 0x8c, 0x65, - 0xc3, 0xd8, 0x23, 0x7a, 0x02, 0xb1, 0x2b, 0xdc, 0x1b, 0x11, 0xde, 0xaa, 0xe4, 0xae, 0x36, 0xd9, - 0x0e, 0x53, 0xb8, 0xf7, 0x22, 0x2f, 0x14, 0xfd, 0x05, 0x68, 0xcc, 0x54, 0xb4, 0x5e, 0xdb, 0xde, - 0xc4, 0x3e, 0x86, 0x28, 0x4b, 0xc4, 0x29, 0xc3, 0xc2, 0xb9, 0x57, 0xdf, 0x03, 0x24, 0x25, 0x1e, - 0xd9, 0x1d, 0xcb, 0xfd, 0x1a, 0xef, 0x16, 0xfb, 0x21, 0x2c, 0xca, 0xd8, 0x12, 0xc1, 0x57, 0xc4, - 0x0d, 0x9e, 0x38, 0x7a, 0xbd, 0xe0, 0xc1, 0x02, 0xfa, 0xb6, 0x21, 0xde, 0x10, 0x66, 0x99, 0x66, - 0x31, 0xa4, 0x6f, 0xa6, 0x8b, 0xd1, 0xff, 0x54, 0x20, 0x2d, 0x8d, 0xe7, 0x98, 0x36, 0xda, 0x1e, - 0xcf, 0x17, 0x10, 0x23, 0x57, 0xc4, 0xa2, 0xf2, 0xcb, 0x7c, 0x12, 0xc2, 0x12, 0x08, 0xc8, 0x1a, - 0x0c, 0x6d, 0x8a, 0x20, 0xaf, 0xd2, 0xc8, 0x4d, 0x95, 0xfa, 0xb5, 0xaa, 0x77, 0xd0, 0xfa, 0x29, - 0xc4, 0x78, 0x92, 0xe0, 0xa4, 0x25, 0x20, 0x7a, 0x54, 0x29, 0x96, 0x35, 0x85, 0x7d, 0xc4, 0x25, - 0x23, 0x7f, 0x26, 0x27, 0xec, 0xf4, 0x84, 0x4f, 0x9b, 0xaa, 0x1f, 0x42, 0xe2, 0x98, 0x8c, 0xcf, - 0xd8, 0xa9, 0x86, 0xcc, 0xc3, 0x56, 0x70, 0x1e, 0xd2, 0x59, 0xb1, 0x98, 0xb3, 0xee, 0xd6, 0xce, - 0xe6, 0xad, 0xb1, 0x9c, 0x09, 0xfd, 0x11, 0xc0, 0x01, 0xa1, 0xee, 0x81, 0x4c, 0x71, 0xe9, 0x9f, - 0x41, 0x92, 0xfb, 0x65, 0x0b, 0x3d, 0x6a, 0xe5, 0x76, 0xea, 0x23, 0x80, 0xea, 0x0d, 0xd4, 0xff, - 0x49, 0xe6, 0x06, 0xdc, 0x2f, 0x90, 0x1e, 0xa1, 0x64, 0xb6, 0xd2, 0x75, 0x48, 0xc9, 0xd3, 0x9b, - 0x85, 0xf8, 0x5d, 0x81, 0xfb, 0xc1, 0x89, 0xf8, 0x1c, 0xe2, 0x0d, 0xbb, 0xdf, 0xc7, 0x56, 0x53, - 0xce, 0xc4, 0x86, 0xff, 0xb4, 0x82, 0xc3, 0xb0, 0x2f, 0x80, 0xa6, 0x1b, 0xe1, 0x26, 0x88, 0x84, - 0x54, 0xa4, 0xde, 0x5e, 0xd1, 0x53, 0x88, 0x4b, 0xc6, 0xe0, 0xd9, 0xc7, 0x41, 0xad, 0x1a, 0x35, - 0x4d, 0x61, 0xe7, 0x5d, 0x30, 0x4a, 0x06, 0xdb, 0x2e, 0xfa, 0x3f, 0x0a, 0x24, 0x4e, 0x86, 0xf6, - 0xc0, 0x76, 0x70, 0x0f, 0xed, 0x04, 0xc7, 0x38, 0xe3, 0x97, 0xec, 0x82, 0xfe, 0xcf, 0xe8, 0x3e, - 0x87, 0xf9, 0x2e, 0x19, 0x5f, 0x06, 0x2b, 0xf0, 0x41, 0xdd, 0x89, 0x33, 0x13, 0x5d, 0xf9, 0xa4, - 0xd7, 0x42, 0xc7, 0x97, 0xed, 0x3f, 0xa3, 0x76, 0x59, 0xae, 0x14, 0x0c, 0x4d, 0x61, 0x37, 0xab, - 0xa8, 0x43, 0x18, 0xf8, 0x5d, 0xca, 0xdc, 0x67, 0xf9, 0xd2, 0x29, 0xbb, 0xb1, 0x34, 0x48, 0x49, - 0xbf, 0xb0, 0x44, 0x77, 0xdf, 0xc5, 0x01, 0x5e, 0x79, 0x79, 0xd1, 0x8f, 0xb0, 0x30, 0x71, 0x6f, - 0x22, 0xfd, 0xf6, 0x4b, 0x3f, 0xf3, 0xc1, 0x1d, 0x2e, 0x5e, 0x7d, 0xed, 0xed, 0x1f, 0x7f, 0xfd, - 0x1a, 0x59, 0x42, 0xe9, 0xdc, 0xd5, 0xf3, 0x1c, 0xeb, 0x42, 0xae, 0xcd, 0x51, 0x0d, 0x9e, 0xe6, - 0x02, 0x12, 0xee, 0xb2, 0x44, 0x4b, 0x53, 0x87, 0x69, 0xb0, 0xdf, 0x3e, 0x99, 0xb5, 0xc9, 0x34, - 0xfe, 0xd5, 0xa5, 0x2f, 0x73, 0xfe, 0x87, 0x68, 0xc1, 0xe3, 0x67, 0x77, 0xd2, 0xc8, 0x41, 0x07, - 0x90, 0xf4, 0x6d, 0x53, 0x14, 0x76, 0x13, 0xf8, 0xd6, 0x6c, 0x66, 0x46, 0x76, 0xfd, 0x1e, 0x2a, - 0x42, 0xca, 0xbf, 0x5a, 0xd1, 0xfb, 0x21, 0x4c, 0xfe, 0xa5, 0x7b, 0x03, 0x55, 0xdd, 0xd3, 0x74, - 0x63, 0xc5, 0x61, 0x19, 0x02, 0x45, 0x67, 0x78, 0xd1, 0x69, 0x84, 0x58, 0xd1, 0x72, 0xd3, 0xb9, - 0x75, 0x97, 0x3c, 0xb9, 0xfc, 0xeb, 0x9a, 0x99, 0x64, 0xfd, 0xb6, 0xe5, 0xac, 0xdf, 0xdb, 0x51, - 0xd0, 0x77, 0xa0, 0x1e, 0x10, 0x8a, 0x02, 0x3f, 0x00, 0xae, 0xd7, 0x59, 0x66, 0x79, 0xca, 0x2e, - 0x63, 0x57, 0xb9, 0xc2, 0x45, 0xf4, 0x90, 0x29, 0x64, 0x57, 0x7e, 0xee, 0xa7, 0x2e, 0x19, 0x7f, - 0xb9, 0xb5, 0xf5, 0x33, 0xaa, 0x82, 0x5a, 0x9d, 0xa4, 0xbc, 0x5e, 0x63, 0x33, 0xbb, 0x27, 0x07, - 0x29, 0x33, 0xcd, 0xb8, 0xa7, 0x6c, 0xa1, 0x33, 0x98, 0x13, 0xeb, 0x0b, 0xad, 0xfa, 0x79, 0x03, - 0x2b, 0x6d, 0x26, 0xb5, 0x14, 0xbb, 0x15, 0x22, 0xf6, 0x1b, 0x88, 0x89, 0x36, 0xae, 0x84, 0xec, - 0x2d, 0xc1, 0xba, 0x3a, 0x73, 0xa3, 0xf1, 0x0e, 0x9e, 0x40, 0xa2, 0x6a, 0xe1, 0x81, 0xd3, 0xb6, - 0xe9, 0xcc, 0xb3, 0x98, 0x25, 0x2c, 0xcd, 0x85, 0x3d, 0x40, 0x29, 0x26, 0xcc, 0x91, 0x2c, 0x2f, - 0xb7, 0xbf, 0x7f, 0xda, 0xea, 0xd0, 0xf6, 0xa8, 0x9e, 0x6d, 0xd8, 0xfd, 0x5c, 0xdf, 0x76, 0x46, - 0x5d, 0x9c, 0xab, 0xf7, 0xb0, 0x43, 0x73, 0x21, 0xff, 0x8a, 0xd4, 0xe7, 0xb8, 0xf1, 0xe3, 0x7f, - 0x03, 0x00, 0x00, 0xff, 0xff, 0x28, 0xf6, 0xde, 0xbe, 0xa8, 0x0c, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ManagementClient is the client API for Management service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ManagementClient interface { - NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) - NodeInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeInfoResponse, error) - ClusterJoin(ctx context.Context, in *ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) - ClusterLeave(ctx context.Context, in *ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) - ClusterInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterInfoResponse, error) - ClusterWatch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Management_ClusterWatchClient, error) - Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) - Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*empty.Empty, error) - Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) - Watch(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (Management_WatchClient, error) - Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) -} - -type managementClient struct { - cc *grpc.ClientConn -} - -func NewManagementClient(cc *grpc.ClientConn) ManagementClient { - return &managementClient{cc} -} - -func (c *managementClient) NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) { - out := new(NodeHealthCheckResponse) - err := c.cc.Invoke(ctx, "/management.Management/NodeHealthCheck", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managementClient) NodeInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeInfoResponse, error) { - out := new(NodeInfoResponse) - err := c.cc.Invoke(ctx, "/management.Management/NodeInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managementClient) ClusterJoin(ctx context.Context, in *ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/management.Management/ClusterJoin", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managementClient) ClusterLeave(ctx context.Context, in *ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/management.Management/ClusterLeave", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managementClient) ClusterInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterInfoResponse, error) { - out := new(ClusterInfoResponse) - err := c.cc.Invoke(ctx, "/management.Management/ClusterInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managementClient) ClusterWatch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Management_ClusterWatchClient, error) { - stream, err := c.cc.NewStream(ctx, &_Management_serviceDesc.Streams[0], "/management.Management/ClusterWatch", opts...) - if err != nil { - return nil, err - } - x := &managementClusterWatchClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Management_ClusterWatchClient interface { - Recv() (*ClusterWatchResponse, error) - grpc.ClientStream -} - -type managementClusterWatchClient struct { - grpc.ClientStream -} - -func (x *managementClusterWatchClient) Recv() (*ClusterWatchResponse, error) { - m := new(ClusterWatchResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *managementClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { - out := new(GetResponse) - err := c.cc.Invoke(ctx, "/management.Management/Get", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managementClient) Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/management.Management/Set", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managementClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/management.Management/Delete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managementClient) Watch(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (Management_WatchClient, error) { - stream, err := c.cc.NewStream(ctx, &_Management_serviceDesc.Streams[1], "/management.Management/Watch", opts...) - if err != nil { - return nil, err - } - x := &managementWatchClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Management_WatchClient interface { - Recv() (*WatchResponse, error) - grpc.ClientStream -} - -type managementWatchClient struct { - grpc.ClientStream -} - -func (x *managementWatchClient) Recv() (*WatchResponse, error) { - m := new(WatchResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *managementClient) Snapshot(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/management.Management/Snapshot", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ManagementServer is the server API for Management service. -type ManagementServer interface { - NodeHealthCheck(context.Context, *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) - NodeInfo(context.Context, *empty.Empty) (*NodeInfoResponse, error) - ClusterJoin(context.Context, *ClusterJoinRequest) (*empty.Empty, error) - ClusterLeave(context.Context, *ClusterLeaveRequest) (*empty.Empty, error) - ClusterInfo(context.Context, *empty.Empty) (*ClusterInfoResponse, error) - ClusterWatch(*empty.Empty, Management_ClusterWatchServer) error - Get(context.Context, *GetRequest) (*GetResponse, error) - Set(context.Context, *SetRequest) (*empty.Empty, error) - Delete(context.Context, *DeleteRequest) (*empty.Empty, error) - Watch(*WatchRequest, Management_WatchServer) error - Snapshot(context.Context, *empty.Empty) (*empty.Empty, error) -} - -// UnimplementedManagementServer can be embedded to have forward compatible implementations. -type UnimplementedManagementServer struct { -} - -func (*UnimplementedManagementServer) NodeHealthCheck(ctx context.Context, req *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodeHealthCheck not implemented") -} -func (*UnimplementedManagementServer) NodeInfo(ctx context.Context, req *empty.Empty) (*NodeInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodeInfo not implemented") -} -func (*UnimplementedManagementServer) ClusterJoin(ctx context.Context, req *ClusterJoinRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method ClusterJoin not implemented") -} -func (*UnimplementedManagementServer) ClusterLeave(ctx context.Context, req *ClusterLeaveRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method ClusterLeave not implemented") -} -func (*UnimplementedManagementServer) ClusterInfo(ctx context.Context, req *empty.Empty) (*ClusterInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ClusterInfo not implemented") -} -func (*UnimplementedManagementServer) ClusterWatch(req *empty.Empty, srv Management_ClusterWatchServer) error { - return status.Errorf(codes.Unimplemented, "method ClusterWatch not implemented") -} -func (*UnimplementedManagementServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") -} -func (*UnimplementedManagementServer) Set(ctx context.Context, req *SetRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") -} -func (*UnimplementedManagementServer) Delete(ctx context.Context, req *DeleteRequest) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} -func (*UnimplementedManagementServer) Watch(req *WatchRequest, srv Management_WatchServer) error { - return status.Errorf(codes.Unimplemented, "method Watch not implemented") -} -func (*UnimplementedManagementServer) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Snapshot not implemented") -} - -func RegisterManagementServer(s *grpc.Server, srv ManagementServer) { - s.RegisterService(&_Management_serviceDesc, srv) -} - -func _Management_NodeHealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeHealthCheckRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagementServer).NodeHealthCheck(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/management.Management/NodeHealthCheck", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).NodeHealthCheck(ctx, req.(*NodeHealthCheckRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Management_NodeInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagementServer).NodeInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/management.Management/NodeInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).NodeInfo(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Management_ClusterJoin_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ClusterJoinRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagementServer).ClusterJoin(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/management.Management/ClusterJoin", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).ClusterJoin(ctx, req.(*ClusterJoinRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Management_ClusterLeave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ClusterLeaveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagementServer).ClusterLeave(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/management.Management/ClusterLeave", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).ClusterLeave(ctx, req.(*ClusterLeaveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Management_ClusterInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagementServer).ClusterInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/management.Management/ClusterInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).ClusterInfo(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Management_ClusterWatch_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(empty.Empty) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ManagementServer).ClusterWatch(m, &managementClusterWatchServer{stream}) -} - -type Management_ClusterWatchServer interface { - Send(*ClusterWatchResponse) error - grpc.ServerStream -} - -type managementClusterWatchServer struct { - grpc.ServerStream -} - -func (x *managementClusterWatchServer) Send(m *ClusterWatchResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Management_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagementServer).Get(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/management.Management/Get", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).Get(ctx, req.(*GetRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Management_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagementServer).Set(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/management.Management/Set", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).Set(ctx, req.(*SetRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Management_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagementServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/management.Management/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).Delete(ctx, req.(*DeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Management_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(WatchRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ManagementServer).Watch(m, &managementWatchServer{stream}) -} - -type Management_WatchServer interface { - Send(*WatchResponse) error - grpc.ServerStream -} - -type managementWatchServer struct { - grpc.ServerStream -} - -func (x *managementWatchServer) Send(m *WatchResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Management_Snapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagementServer).Snapshot(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/management.Management/Snapshot", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagementServer).Snapshot(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -var _Management_serviceDesc = grpc.ServiceDesc{ - ServiceName: "management.Management", - HandlerType: (*ManagementServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "NodeHealthCheck", - Handler: _Management_NodeHealthCheck_Handler, - }, - { - MethodName: "NodeInfo", - Handler: _Management_NodeInfo_Handler, - }, - { - MethodName: "ClusterJoin", - Handler: _Management_ClusterJoin_Handler, - }, - { - MethodName: "ClusterLeave", - Handler: _Management_ClusterLeave_Handler, - }, - { - MethodName: "ClusterInfo", - Handler: _Management_ClusterInfo_Handler, - }, - { - MethodName: "Get", - Handler: _Management_Get_Handler, - }, - { - MethodName: "Set", - Handler: _Management_Set_Handler, - }, - { - MethodName: "Delete", - Handler: _Management_Delete_Handler, - }, - { - MethodName: "Snapshot", - Handler: _Management_Snapshot_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ClusterWatch", - Handler: _Management_ClusterWatch_Handler, - ServerStreams: true, - }, - { - StreamName: "Watch", - Handler: _Management_Watch_Handler, - ServerStreams: true, - }, - }, - Metadata: "protobuf/management/management.proto", -} diff --git a/protobuf/management/management.pb.gw.go b/protobuf/management/management.pb.gw.go deleted file mode 100644 index 5430218..0000000 --- a/protobuf/management/management.pb.gw.go +++ /dev/null @@ -1,379 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: protobuf/management/management.proto - -/* -Package management is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package management - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/empty" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray - -var ( - filter_Management_NodeHealthCheck_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Management_NodeHealthCheck_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq NodeHealthCheckRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Management_NodeHealthCheck_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.NodeHealthCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Management_NodeInfo_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq empty.Empty - var metadata runtime.ServerMetadata - - msg, err := client.NodeInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Management_ClusterInfo_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq empty.Empty - var metadata runtime.ServerMetadata - - msg, err := client.ClusterInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Management_Get_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["key"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") - } - - protoReq.Key, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) - } - - msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Management_Set_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq SetRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["key"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") - } - - protoReq.Key, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) - } - - msg, err := client.Set(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Management_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DeleteRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["key"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") - } - - protoReq.Key, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) - } - - msg, err := client.Delete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Management_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq empty.Empty - var metadata runtime.ServerMetadata - - msg, err := client.Snapshot(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -// RegisterManagementHandlerFromEndpoint is same as RegisterManagementHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterManagementHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterManagementHandler(ctx, mux, conn) -} - -// RegisterManagementHandler registers the http handlers for service Management to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterManagementHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterManagementHandlerClient(ctx, mux, NewManagementClient(conn)) -} - -// RegisterManagementHandlerClient registers the http handlers for service Management -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ManagementClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ManagementClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "ManagementClient" to call the correct interceptors. -func RegisterManagementHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ManagementClient) error { - - mux.Handle("GET", pattern_Management_NodeHealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Management_NodeHealthCheck_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Management_NodeHealthCheck_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Management_NodeInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Management_NodeInfo_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Management_NodeInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Management_ClusterInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Management_ClusterInfo_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Management_ClusterInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Management_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Management_Get_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Management_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("PUT", pattern_Management_Set_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Management_Set_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Management_Set_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("DELETE", pattern_Management_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Management_Delete_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Management_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Management_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Management_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Management_Snapshot_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Management_NodeHealthCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "healthcheck"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Management_NodeInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "node", "status"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Management_ClusterInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "cluster", "status"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Management_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "data", "key"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Management_Set_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "data", "key"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Management_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 3, 0, 4, 1, 5, 2}, []string{"v1", "data", "key"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Management_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "snapshot"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Management_NodeHealthCheck_0 = runtime.ForwardResponseMessage - - forward_Management_NodeInfo_0 = runtime.ForwardResponseMessage - - forward_Management_ClusterInfo_0 = runtime.ForwardResponseMessage - - forward_Management_Get_0 = runtime.ForwardResponseMessage - - forward_Management_Set_0 = runtime.ForwardResponseMessage - - forward_Management_Delete_0 = runtime.ForwardResponseMessage - - forward_Management_Snapshot_0 = runtime.ForwardResponseMessage -) diff --git a/protobuf/management/management.proto b/protobuf/management/management.proto deleted file mode 100644 index e3c8429..0000000 --- a/protobuf/management/management.proto +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -import "google/protobuf/any.proto"; -import "google/protobuf/empty.proto"; -import "google/api/annotations.proto"; -import "protoc-gen-swagger/options/annotations.proto"; - -package management; - -option go_package = "github.com/mosuka/blast/protobuf/management"; - -service Management { - rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) { - option (google.api.http) = { - get: "/v1/node/healthcheck" - }; - } - rpc NodeInfo (google.protobuf.Empty) returns (NodeInfoResponse) { - option (google.api.http) = { - get: "/v1/node/status" - }; - } - - rpc ClusterJoin (ClusterJoinRequest) returns (google.protobuf.Empty) {} - rpc ClusterLeave (ClusterLeaveRequest) returns (google.protobuf.Empty) {} - rpc ClusterInfo (google.protobuf.Empty) returns (ClusterInfoResponse) { - option (google.api.http) = { - get: "/v1/cluster/status" - }; - } - rpc ClusterWatch (google.protobuf.Empty) returns (stream ClusterWatchResponse) {} - - rpc Get (GetRequest) returns (GetResponse) { - option (google.api.http) = { - get: "/v1/data/{key=**}" - }; - } - rpc Set (SetRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - put: "/v1/data/{key=**}" - body: "*" - }; - } - rpc Delete (DeleteRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/data/{key=**}" - }; - } - rpc Watch (WatchRequest) returns (stream WatchResponse) {} - rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) { - option (google.api.http) = { - get: "/v1/snapshot" - }; - } -} - -message NodeHealthCheckRequest { - enum Probe { - UNKNOWN = 0; - HEALTHINESS = 1; - LIVENESS = 2; - READINESS = 3; - } - Probe probe = 1; -} - -message NodeHealthCheckResponse { - enum State { - UNKNOWN = 0; - HEALTHY = 1; - UNHEALTHY = 2; - ALIVE = 3; - DEAD = 4; - READY = 5; - NOT_READY = 6; - } - State state = 1; -} - -message Metadata { - string grpc_address = 1; - string grpc_gateway_address = 2; - string http_address = 3; -} - -message Node { - enum State { - UNKNOWN = 0; - FOLLOWER = 1; - CANDIDATE = 2; - LEADER = 3; - SHUTDOWN = 4; - } - string id = 1; - string bind_address = 2; - State state = 3; - Metadata metadata = 4; -} - -message Cluster { - map nodes = 1; -} - -message NodeInfoResponse { - Node node = 1; -} - -message ClusterJoinRequest { - Node node = 1; -} - -message ClusterLeaveRequest { - string id = 1; -} - -message ClusterInfoResponse { - Cluster cluster = 1; -} - -message ClusterWatchResponse { - enum Event { - UNKNOWN = 0; - JOIN = 1; - LEAVE = 2; - UPDATE = 3; - } - Event event = 1; - Node node = 2; - Cluster cluster = 3; -} - -message KeyValue { - string key = 1; - google.protobuf.Any value = 2; -} - -message GetRequest { - string key = 1; -} - -message GetResponse { -// option (grpc.gateway.protoc_gen_swagger.options.openapiv2_schema) = { -// json_schema: { -// required: ["value"] -// }, -// example: { -// value: '{ "fields": { "field1": "Get Example", "field2": "This is an example Get response." } }' -// } -// }; -// google.protobuf.Any value = 1 [(grpc.gateway.protoc_gen_swagger.options.openapiv2_field) = {type: 6}]; - google.protobuf.Any value = 1; -} - -message SetRequest { - string key = 1; - google.protobuf.Any value = 2; -} - -message DeleteRequest { - string key = 1; -} - -message WatchRequest { - string key = 1; -} - -message WatchResponse { - enum Command { - UNKNOWN = 0; - SET = 1; - DELETE = 2; - } - Command command = 1; - string key = 2; - google.protobuf.Any value = 3; -} - -message Proposal { - enum Event { - UNKNOWN = 0; - SET_NODE = 1; - DELETE_NODE = 2; - SET_VALUE = 3; - DELETE_VALUE = 4; - } - Event event = 1; - Node node = 2; - KeyValue key_value = 3; -} diff --git a/protobuf/management/management.swagger.json b/protobuf/management/management.swagger.json deleted file mode 100644 index 18f1ed1..0000000 --- a/protobuf/management/management.swagger.json +++ /dev/null @@ -1,409 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "protobuf/management/management.proto", - "version": "version not set" - }, - "schemes": [ - "http", - "https" - ], - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/v1/cluster/status": { - "get": { - "operationId": "ClusterInfo", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/managementClusterInfoResponse" - } - } - }, - "tags": [ - "Management" - ] - } - }, - "/v1/data/{key}": { - "get": { - "operationId": "Get", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/managementGetResponse" - } - } - }, - "parameters": [ - { - "name": "key", - "in": "path", - "required": true, - "type": "string" - } - ], - "tags": [ - "Management" - ] - }, - "delete": { - "operationId": "Delete", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "properties": {} - } - } - }, - "parameters": [ - { - "name": "key", - "in": "path", - "required": true, - "type": "string" - } - ], - "tags": [ - "Management" - ] - }, - "put": { - "operationId": "Set", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "properties": {} - } - } - }, - "parameters": [ - { - "name": "key", - "in": "path", - "required": true, - "type": "string" - }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/managementSetRequest" - } - } - ], - "tags": [ - "Management" - ] - } - }, - "/v1/node/healthcheck": { - "get": { - "operationId": "NodeHealthCheck", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/managementNodeHealthCheckResponse" - } - } - }, - "parameters": [ - { - "name": "probe", - "in": "query", - "required": false, - "type": "string", - "enum": [ - "UNKNOWN", - "HEALTHINESS", - "LIVENESS", - "READINESS" - ], - "default": "UNKNOWN" - } - ], - "tags": [ - "Management" - ] - } - }, - "/v1/node/status": { - "get": { - "operationId": "NodeInfo", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/managementNodeInfoResponse" - } - } - }, - "tags": [ - "Management" - ] - } - }, - "/v1/snapshot": { - "get": { - "operationId": "Snapshot", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "properties": {} - } - } - }, - "tags": [ - "Management" - ] - } - } - }, - "definitions": { - "NodeHealthCheckRequestProbe": { - "type": "string", - "enum": [ - "UNKNOWN", - "HEALTHINESS", - "LIVENESS", - "READINESS" - ], - "default": "UNKNOWN" - }, - "WatchResponseCommand": { - "type": "string", - "enum": [ - "UNKNOWN", - "SET", - "DELETE" - ], - "default": "UNKNOWN" - }, - "managementCluster": { - "type": "object", - "properties": { - "nodes": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/managementNode" - } - } - } - }, - "managementClusterInfoResponse": { - "type": "object", - "properties": { - "cluster": { - "$ref": "#/definitions/managementCluster" - } - } - }, - "managementClusterWatchResponse": { - "type": "object", - "properties": { - "event": { - "$ref": "#/definitions/managementClusterWatchResponseEvent" - }, - "node": { - "$ref": "#/definitions/managementNode" - }, - "cluster": { - "$ref": "#/definitions/managementCluster" - } - } - }, - "managementClusterWatchResponseEvent": { - "type": "string", - "enum": [ - "UNKNOWN", - "JOIN", - "LEAVE", - "UPDATE" - ], - "default": "UNKNOWN" - }, - "managementGetResponse": { - "type": "object", - "properties": { - "value": { - "$ref": "#/definitions/protobufAny", - "title": "option (grpc.gateway.protoc_gen_swagger.options.openapiv2_schema) = {\n json_schema: {\n required: [\"value\"]\n },\n example: {\n value: '{ \"fields\": { \"field1\": \"Get Example\", \"field2\": \"This is an example Get response.\" } }'\n }\n };\n google.protobuf.Any value = 1 [(grpc.gateway.protoc_gen_swagger.options.openapiv2_field) = {type: 6}];" - } - } - }, - "managementMetadata": { - "type": "object", - "properties": { - "grpc_address": { - "type": "string" - }, - "grpc_gateway_address": { - "type": "string" - }, - "http_address": { - "type": "string" - } - } - }, - "managementNode": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "bind_address": { - "type": "string" - }, - "state": { - "$ref": "#/definitions/managementNodeState" - }, - "metadata": { - "$ref": "#/definitions/managementMetadata" - } - } - }, - "managementNodeHealthCheckResponse": { - "type": "object", - "properties": { - "state": { - "$ref": "#/definitions/managementNodeHealthCheckResponseState" - } - } - }, - "managementNodeHealthCheckResponseState": { - "type": "string", - "enum": [ - "UNKNOWN", - "HEALTHY", - "UNHEALTHY", - "ALIVE", - "DEAD", - "READY", - "NOT_READY" - ], - "default": "UNKNOWN" - }, - "managementNodeInfoResponse": { - "type": "object", - "properties": { - "node": { - "$ref": "#/definitions/managementNode" - } - } - }, - "managementNodeState": { - "type": "string", - "enum": [ - "UNKNOWN", - "FOLLOWER", - "CANDIDATE", - "LEADER", - "SHUTDOWN" - ], - "default": "UNKNOWN" - }, - "managementSetRequest": { - "type": "object", - "properties": { - "key": { - "type": "string" - }, - "value": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "managementWatchResponse": { - "type": "object", - "properties": { - "command": { - "$ref": "#/definitions/WatchResponseCommand" - }, - "key": { - "type": "string" - }, - "value": { - "$ref": "#/definitions/protobufAny" - } - } - }, - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string", - "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." - }, - "value": { - "type": "string", - "format": "byte", - "description": "Must be a valid serialized protocol buffer of the above specified type." - } - }, - "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" - }, - "runtimeStreamError": { - "type": "object", - "properties": { - "grpc_code": { - "type": "integer", - "format": "int32" - }, - "http_code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "http_status": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - } - }, - "x-stream-definitions": { - "managementClusterWatchResponse": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/managementClusterWatchResponse" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of managementClusterWatchResponse" - }, - "managementWatchResponse": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/managementWatchResponse" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of managementWatchResponse" - } - } -} diff --git a/protobuf/util.go b/protobuf/util.go deleted file mode 100644 index d3a6ca5..0000000 --- a/protobuf/util.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package protobuf - -import ( - "encoding/json" - - "github.com/golang/protobuf/ptypes/any" - "github.com/mosuka/blast/registry" -) - -func MarshalAny(message *any.Any) (interface{}, error) { - if message == nil { - return nil, nil - } - - typeUrl := message.TypeUrl - value := message.Value - - instance := registry.TypeInstanceByName(typeUrl) - - err := json.Unmarshal(value, instance) - if err != nil { - return nil, err - } - - return instance, nil -} - -func UnmarshalAny(instance interface{}, message *any.Any) error { - var err error - - if instance == nil { - return nil - } - - message.TypeUrl = registry.TypeNameByInstance(instance) - - message.Value, err = json.Marshal(instance) - if err != nil { - return err - } - - return nil -} diff --git a/protobuf/util_test.go b/protobuf/util_test.go deleted file mode 100644 index 9523b51..0000000 --- a/protobuf/util_test.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package protobuf - -import ( - "bytes" - "testing" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/search/query" - "github.com/golang/protobuf/ptypes/any" -) - -func TestMarshalAny_Slice(t *testing.T) { - data := []interface{}{"a", 1} - - dataAny := &any.Any{} - err := UnmarshalAny(data, dataAny) - if err != nil { - t.Fatalf("%v", err) - } - - expectedType := "[]interface {}" - actualType := dataAny.TypeUrl - if expectedType != actualType { - t.Fatalf("expected content to see %s, saw %s", expectedType, actualType) - } - - expectedValue := []byte(`["a",1]`) - actualValue := dataAny.Value - if !bytes.Equal(expectedValue, actualValue) { - t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) - } -} - -func TestMarshalAny_Map(t *testing.T) { - data := map[string]interface{}{"a": 1, "b": 2, "c": 3} - - dataAny := &any.Any{} - err := UnmarshalAny(data, dataAny) - if err != nil { - t.Fatalf("%v", err) - } - - expectedMapType := "map[string]interface {}" - actualMapType := dataAny.TypeUrl - if expectedMapType != actualMapType { - t.Fatalf("expected content to see %s, saw %s", expectedMapType, actualMapType) - } - - expectedValue := []byte(`{"a":1,"b":2,"c":3}`) - actualValue := dataAny.Value - if !bytes.Equal(expectedValue, actualValue) { - t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) - } -} - -//func TestMarshalAny_Document(t *testing.T) { -// fieldsMap := map[string]interface{}{"f1": "aaa", "f2": 222, "f3": "ccc"} -// fieldsAny := &any.Any{} -// err := UnmarshalAny(fieldsMap, fieldsAny) -// if err != nil { -// t.Fatalf("%v", err) -// } -// -// data := &index.Document{ -// Id: "1", -// Fields: fieldsAny, -// } -// -// dataAny := &any.Any{} -// err = UnmarshalAny(data, dataAny) -// if err != nil { -// t.Fatalf("%v", err) -// } -// -// expectedType := "index.Document" -// actualType := dataAny.TypeUrl -// if expectedType != actualType { -// t.Fatalf("expected content to see %s, saw %s", expectedType, actualType) -// } -// -// expectedValue := []byte(`{"id":"1","fields":{"type_url":"map[string]interface {}","value":"eyJmMSI6ImFhYSIsImYyIjoyMjIsImYzIjoiY2NjIn0="}}`) -// actualValue := dataAny.Value -// if !bytes.Equal(expectedValue, actualValue) { -// t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) -// } -//} - -//func TestMarshalAny_Node(t *testing.T) { -// data := &raft.Node{ -// Id: "node1", -// Metadata: &raft.Metadata{ -// GrpcAddr: ":5050", -// DataDir: "/tmp/blast/index1", -// BindAddr: ":6060", -// HttpAddr: ":8080", -// Leader: true, -// }, -// } -// -// dataAny := &any.Any{} -// err := UnmarshalAny(data, dataAny) -// if err != nil { -// t.Fatalf("%v", err) -// } -// -// expectedType := "raft.Node" -// actualType := dataAny.TypeUrl -// if expectedType != actualType { -// t.Fatalf("expected content to see %s, saw %s", expectedType, actualType) -// } -// -// expectedValue := []byte(`{"id":"node1","metadata":{"bind_addr":":6060","grpc_addr":":5050","http_addr":":8080","data_dir":"/tmp/blast/index1","leader":true}}`) -// actualValue := dataAny.Value -// if !bytes.Equal(expectedValue, actualValue) { -// t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) -// } -//} - -func TestMarshalAny_SearchRequest(t *testing.T) { - data := bleve.NewSearchRequest(bleve.NewQueryStringQuery("blast")) - - dataAny := &any.Any{} - err := UnmarshalAny(data, dataAny) - if err != nil { - t.Fatalf("%v", err) - } - - expectedType := "bleve.SearchRequest" - actualType := dataAny.TypeUrl - if expectedType != actualType { - t.Fatalf("expected content to see %s, saw %s", expectedType, actualType) - } - - expectedValue := []byte(`{"query":{"query":"blast"},"size":10,"from":0,"highlight":null,"fields":null,"facets":null,"explain":false,"sort":["-_score"],"includeLocations":false,"search_after":null,"search_before":null}`) - actualValue := dataAny.Value - if !bytes.Equal(expectedValue, actualValue) { - t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) - } -} - -func TestMarshalAny_SearchResult(t *testing.T) { - data := &bleve.SearchResult{ - Total: 10, - } - - dataAny := &any.Any{} - err := UnmarshalAny(data, dataAny) - if err != nil { - t.Fatalf("%v", err) - } - - expectedType := "bleve.SearchResult" - actualType := dataAny.TypeUrl - if expectedType != actualType { - t.Fatalf("expected content to see %s, saw %s", expectedType, actualType) - } - - expectedValue := []byte(`{"status":null,"request":null,"hits":null,"total_hits":10,"max_score":0,"took":0,"facets":null}`) - actualValue := dataAny.Value - if !bytes.Equal(expectedValue, actualValue) { - t.Fatalf("expected content to see %v, saw %v", expectedValue, actualValue) - } -} - -func TestUnmarshalAny_Slice(t *testing.T) { - dataAny := &any.Any{ - TypeUrl: "[]interface {}", - Value: []byte(`["a",1]`), - } - - ins, err := MarshalAny(dataAny) - if err != nil { - t.Fatalf("%v", err) - } - - data := *ins.(*[]interface{}) - - expected1 := "a" - actual1 := data[0] - if expected1 != actual1 { - t.Fatalf("expected content to see %v, saw %v", expected1, actual1) - } - - expected2 := float64(1) - actual2 := data[1] - if expected2 != actual2 { - t.Fatalf("expected content to see %v, saw %v", expected2, actual2) - } -} - -func TestUnmarshalAny_Map(t *testing.T) { - dataAny := &any.Any{ - TypeUrl: "map[string]interface {}", - Value: []byte(`{"a":1,"b":2,"c":3}`), - } - - ins, err := MarshalAny(dataAny) - if err != nil { - t.Fatalf("%v", err) - } - - data := *ins.(*map[string]interface{}) - - expected1 := float64(1) - actual1 := data["a"] - if expected1 != actual1 { - t.Fatalf("expected content to see %v, saw %v", expected1, actual1) - } - - expected2 := float64(2) - actual2 := data["b"] - if expected2 != actual2 { - t.Fatalf("expected content to see %v, saw %v", expected2, actual2) - } - - expected3 := float64(3) - actual3 := data["c"] - if expected3 != actual3 { - t.Fatalf("expected content to see %v, saw %v", expected3, actual3) - } -} - -//func TestUnmarshalAny_Document(t *testing.T) { -// dataAny := &any.Any{ -// TypeUrl: "index.Document", -// Value: []byte(`{"id":"1","fields":{"type_url":"map[string]interface {}","value":"eyJmMSI6ImFhYSIsImYyIjoyMjIsImYzIjoiY2NjIn0="}}`), -// } -// -// ins, err := MarshalAny(dataAny) -// if err != nil { -// t.Fatalf("%v", err) -// } -// -// data := *ins.(*index.Document) -// -// expected1 := "1" -// actual1 := data.Id -// if expected1 != actual1 { -// t.Fatalf("expected content to see %v, saw %v", expected1, actual1) -// } -// -// expected2 := "map[string]interface {}" -// actual2 := data.Fields.TypeUrl -// if expected2 != actual2 { -// t.Fatalf("expected content to see %v, saw %v", expected2, actual2) -// } -// -// expected3 := []byte(`{"f1":"aaa","f2":222,"f3":"ccc"}`) -// actual3 := data.Fields.Value -// if !bytes.Equal(expected3, actual3) { -// t.Fatalf("expected content to see %v, saw %v", expected3, actual3) -// } -//} - -func TestUnmarshalAny_SearchRequest(t *testing.T) { - dataAny := &any.Any{ - TypeUrl: "bleve.SearchRequest", - Value: []byte(`{"query":{"query":"blast"},"size":10,"from":0,"highlight":null,"fields":null,"facets":null,"explain":false,"sort":["-_score"],"includeLocations":false}`), - } - - ins, err := MarshalAny(dataAny) - if err != nil { - t.Fatalf("%v", err) - } - - data := *ins.(*bleve.SearchRequest) - - expected1 := bleve.NewQueryStringQuery("blast").Query - actual1 := data.Query.(*query.QueryStringQuery).Query - if expected1 != actual1 { - t.Fatalf("expected content to see %v, saw %v", expected1, actual1) - } -} - -func TestUnmarshalAny_SearchResult(t *testing.T) { - dataAny := &any.Any{ - TypeUrl: "bleve.SearchResult", - Value: []byte(`{"status":null,"request":null,"hits":null,"total_hits":10,"max_score":0,"took":0,"facets":null}`), - } - - ins, err := MarshalAny(dataAny) - if err != nil { - t.Fatalf("%v", err) - } - - data := *ins.(*bleve.SearchResult) - - expected1 := uint64(10) - actual1 := data.Total - if expected1 != actual1 { - t.Fatalf("expected content to see %v, saw %v", expected1, actual1) - } -} diff --git a/registry/type.go b/registry/type.go index 5cb1206..7dc13b0 100644 --- a/registry/type.go +++ b/registry/type.go @@ -1,57 +1,11 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package registry import ( "errors" "fmt" "reflect" - - "github.com/blevesearch/bleve" - "github.com/blevesearch/bleve/mapping" ) -func init() { - RegisterType("bool", reflect.TypeOf(false)) - RegisterType("string", reflect.TypeOf("")) - RegisterType("int", reflect.TypeOf(int(0))) - RegisterType("int8", reflect.TypeOf(int8(0))) - RegisterType("int16", reflect.TypeOf(int16(0))) - RegisterType("int32", reflect.TypeOf(int32(0))) - RegisterType("int64", reflect.TypeOf(int64(0))) - RegisterType("uint", reflect.TypeOf(uint(0))) - RegisterType("uint8", reflect.TypeOf(uint8(0))) - RegisterType("uint16", reflect.TypeOf(uint16(0))) - RegisterType("uint32", reflect.TypeOf(uint32(0))) - RegisterType("uint64", reflect.TypeOf(uint64(0))) - RegisterType("uintptr", reflect.TypeOf(uintptr(0))) - RegisterType("byte", reflect.TypeOf(byte(0))) - RegisterType("rune", reflect.TypeOf(rune(0))) - RegisterType("float32", reflect.TypeOf(float32(0))) - RegisterType("float64", reflect.TypeOf(float64(0))) - RegisterType("complex64", reflect.TypeOf(complex64(0))) - RegisterType("complex128", reflect.TypeOf(complex128(0))) - - RegisterType("map[string]interface {}", reflect.TypeOf((map[string]interface{})(nil))) - RegisterType("[]interface {}", reflect.TypeOf(([]interface{})(nil))) - - RegisterType("mapping.IndexMappingImpl", reflect.TypeOf(mapping.IndexMappingImpl{})) - RegisterType("bleve.SearchRequest", reflect.TypeOf(bleve.SearchRequest{})) - RegisterType("bleve.SearchResult", reflect.TypeOf(bleve.SearchResult{})) -} - type TypeRegistry map[string]reflect.Type var Types = make(TypeRegistry, 0) @@ -68,13 +22,11 @@ func TypeByName(name string) reflect.Type { } func TypeNameByInstance(instance interface{}) string { - switch instance.(type) { - case bool, string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr, float32, float64, complex64, complex128: - return reflect.TypeOf(instance).Name() - case map[string]interface{}, []interface{}: - return reflect.TypeOf(instance).String() + switch ins := instance.(type) { + case map[string]interface{}: + return reflect.TypeOf(ins).String() default: - return reflect.TypeOf(instance).Elem().String() + return reflect.TypeOf(ins).Elem().String() } } diff --git a/server/grpc_gateway.go b/server/grpc_gateway.go new file mode 100644 index 0000000..c319fc0 --- /dev/null +++ b/server/grpc_gateway.go @@ -0,0 +1,129 @@ +package server + +import ( + "context" + "math" + "net" + "net/http" + "time" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" +) + +func responseFilter(ctx context.Context, w http.ResponseWriter, resp proto.Message) error { + switch resp.(type) { + case *protobuf.GetResponse: + w.Header().Set("Content-Type", "application/json") + case *protobuf.MetricsResponse: + w.Header().Set("Content-Type", "text/plain; version=0.0.4; charset=utf-8") + default: + w.Header().Set("Content-Type", marshaler.DefaultContentType) + } + + return nil +} + +type GRPCGateway struct { + httpAddress string + grpcAddress string + + cancel context.CancelFunc + listener net.Listener + mux *runtime.ServeMux + + certificateFile string + keyFile string + + logger *zap.Logger +} + +func NewGRPCGateway(httpAddress string, grpcAddress string, certificateFile string, keyFile string, commonName string, logger *zap.Logger) (*GRPCGateway, error) { + dialOpts := []grpc.DialOption{ + grpc.WithDefaultCallOptions( + grpc.MaxCallSendMsgSize(math.MaxInt64), + grpc.MaxCallRecvMsgSize(math.MaxInt64), + ), + grpc.WithKeepaliveParams( + keepalive.ClientParameters{ + Time: 1 * time.Second, + Timeout: 5 * time.Second, + PermitWithoutStream: true, + }, + ), + } + + baseCtx := context.TODO() + ctx, cancel := context.WithCancel(baseCtx) + + mux := runtime.NewServeMux( + runtime.WithMarshalerOption(runtime.MIMEWildcard, new(marshaler.BlastMarshaler)), + runtime.WithForwardResponseOption(responseFilter), + ) + + if certificateFile == "" { + dialOpts = append(dialOpts, grpc.WithInsecure()) + } else { + creds, err := credentials.NewClientTLSFromFile(certificateFile, commonName) + if err != nil { + return nil, err + } + dialOpts = append(dialOpts, grpc.WithTransportCredentials(creds)) + } + + err := protobuf.RegisterIndexHandlerFromEndpoint(ctx, mux, grpcAddress, dialOpts) + if err != nil { + logger.Error("failed to register KVS handler from endpoint", zap.Error(err)) + return nil, err + } + + listener, err := net.Listen("tcp", httpAddress) + if err != nil { + logger.Error("failed to create index service", zap.Error(err)) + return nil, err + } + + return &GRPCGateway{ + httpAddress: httpAddress, + grpcAddress: grpcAddress, + listener: listener, + mux: mux, + cancel: cancel, + certificateFile: certificateFile, + keyFile: keyFile, + logger: logger, + }, nil +} + +func (s *GRPCGateway) Start() error { + if s.certificateFile == "" && s.keyFile == "" { + go func() { + _ = http.Serve(s.listener, s.mux) + }() + } else { + go func() { + _ = http.ServeTLS(s.listener, s.mux, s.certificateFile, s.keyFile) + }() + } + + s.logger.Info("gRPC gateway started", zap.String("http_address", s.httpAddress)) + return nil +} + +func (s *GRPCGateway) Stop() error { + defer s.cancel() + + err := s.listener.Close() + if err != nil { + s.logger.Error("failed to close listener", zap.String("http_address", s.listener.Addr().String()), zap.Error(err)) + } + + s.logger.Info("gRPC gateway stopped", zap.String("http_address", s.httpAddress)) + return nil +} diff --git a/server/grpc_server.go b/server/grpc_server.go new file mode 100644 index 0000000..d01f5a3 --- /dev/null +++ b/server/grpc_server.go @@ -0,0 +1,129 @@ +package server + +import ( + "math" + "net" + "time" + + grpcmiddleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpczap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/mosuka/blast/metric" + "github.com/mosuka/blast/protobuf" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" +) + +type GRPCServer struct { + grpcAddress string + service *GRPCService + server *grpc.Server + listener net.Listener + + certificateFile string + keyFile string + commonName string + + logger *zap.Logger +} + +func NewGRPCServer(grpcAddress string, raftServer *RaftServer, certificateFile string, keyFile string, commonName string, logger *zap.Logger) (*GRPCServer, error) { + grpcLogger := logger.Named("grpc") + + opts := []grpc.ServerOption{ + grpc.MaxRecvMsgSize(math.MaxInt64), + grpc.MaxSendMsgSize(math.MaxInt64), + grpc.StreamInterceptor( + grpcmiddleware.ChainStreamServer( + metric.GrpcMetrics.StreamServerInterceptor(), + grpczap.StreamServerInterceptor(grpcLogger), + ), + ), + grpc.UnaryInterceptor( + grpcmiddleware.ChainUnaryServer( + metric.GrpcMetrics.UnaryServerInterceptor(), + grpczap.UnaryServerInterceptor(grpcLogger), + ), + ), + grpc.KeepaliveParams( + keepalive.ServerParameters{ + //MaxConnectionIdle: 0, + //MaxConnectionAge: 0, + //MaxConnectionAgeGrace: 0, + Time: 5 * time.Second, + Timeout: 5 * time.Second, + }, + ), + } + + if certificateFile == "" && keyFile == "" { + logger.Info("disabling TLS") + } else { + logger.Info("enabling TLS") + creds, err := credentials.NewServerTLSFromFile(certificateFile, keyFile) + if err != nil { + logger.Error("failed to create credentials", zap.Error(err)) + } + opts = append(opts, grpc.Creds(creds)) + } + + server := grpc.NewServer( + opts..., + ) + + service, err := NewGRPCService(raftServer, certificateFile, commonName, logger) + if err != nil { + logger.Error("failed to create key value store service", zap.Error(err)) + return nil, err + } + + protobuf.RegisterIndexServer(server, service) + + // Initialize all metrics. + metric.GrpcMetrics.InitializeMetrics(server) + grpc_prometheus.Register(server) + + listener, err := net.Listen("tcp", grpcAddress) + if err != nil { + logger.Error("failed to create listener", zap.String("grpc_address", grpcAddress), zap.Error(err)) + return nil, err + } + + return &GRPCServer{ + grpcAddress: grpcAddress, + service: service, + server: server, + listener: listener, + certificateFile: certificateFile, + keyFile: keyFile, + commonName: commonName, + logger: logger, + }, nil +} + +func (s *GRPCServer) Start() error { + if err := s.service.Start(); err != nil { + s.logger.Error("failed to start service", zap.Error(err)) + } + + go func() { + _ = s.server.Serve(s.listener) + }() + + s.logger.Info("gRPC server started", zap.String("grpc_address", s.grpcAddress)) + return nil +} + +func (s *GRPCServer) Stop() error { + if err := s.service.Stop(); err != nil { + s.logger.Error("failed to stop service", zap.Error(err)) + } + + //s.server.GracefulStop() + s.server.Stop() + + s.logger.Info("gRPC server stopped", zap.String("grpc_address", s.grpcAddress)) + return nil +} diff --git a/server/grpc_service.go b/server/grpc_service.go new file mode 100644 index 0000000..2d0843a --- /dev/null +++ b/server/grpc_service.go @@ -0,0 +1,540 @@ +package server + +import ( + "bytes" + "context" + "encoding/json" + "sync" + "time" + + "github.com/blevesearch/bleve" + "github.com/golang/protobuf/ptypes/empty" + "github.com/hashicorp/raft" + "github.com/mosuka/blast/client" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/metric" + "github.com/mosuka/blast/protobuf" + "github.com/prometheus/common/expfmt" + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type GRPCService struct { + raftServer *RaftServer + certificateFile string + commonName string + logger *zap.Logger + + watchMutex sync.RWMutex + watchChans map[chan protobuf.WatchResponse]struct{} + + peerClients map[string]*client.GRPCClient + + watchClusterStopCh chan struct{} + watchClusterDoneCh chan struct{} +} + +func NewGRPCService(raftServer *RaftServer, certificateFile string, commonName string, logger *zap.Logger) (*GRPCService, error) { + return &GRPCService{ + raftServer: raftServer, + certificateFile: certificateFile, + commonName: commonName, + logger: logger, + + watchChans: make(map[chan protobuf.WatchResponse]struct{}), + + peerClients: make(map[string]*client.GRPCClient, 0), + + watchClusterStopCh: make(chan struct{}), + watchClusterDoneCh: make(chan struct{}), + }, nil +} + +func (s *GRPCService) Start() error { + go func() { + s.startWatchCluster(500 * time.Millisecond) + }() + + s.logger.Info("gRPC service started") + return nil +} + +func (s *GRPCService) Stop() error { + s.stopWatchCluster() + + s.logger.Info("gRPC service stopped") + return nil +} + +func (s *GRPCService) startWatchCluster(checkInterval time.Duration) { + s.logger.Info("start to update cluster info") + + defer func() { + close(s.watchClusterDoneCh) + }() + + ticker := time.NewTicker(checkInterval) + defer ticker.Stop() + + timeout := 60 * time.Second + if err := s.raftServer.WaitForDetectLeader(timeout); err != nil { + if err == errors.ErrTimeout { + s.logger.Error("leader detection timed out", zap.Duration("timeout", timeout), zap.Error(err)) + } else { + s.logger.Error("failed to detect leader", zap.Error(err)) + } + } + + for { + select { + case <-s.watchClusterStopCh: + s.logger.Info("received a request to stop updating a cluster") + return + case event := <-s.raftServer.applyCh: + watchResp := &protobuf.WatchResponse{ + Event: event, + } + for c := range s.watchChans { + c <- *watchResp + } + case <-ticker.C: + s.watchMutex.Lock() + + // open clients for peer nodes + nodes, err := s.raftServer.Nodes() + if err != nil { + s.logger.Warn("failed to get cluster info", zap.String("err", err.Error())) + } + for id, node := range nodes { + if id == s.raftServer.id { + continue + } + + if node.Metadata == nil || node.Metadata.GrpcAddress == "" { + s.logger.Debug("gRPC address missing", zap.String("id", id)) + continue + } + if c, ok := s.peerClients[id]; ok { + if c.Target() != node.Metadata.GrpcAddress { + s.logger.Debug("close client", zap.String("id", id), zap.String("grpc_address", c.Target())) + delete(s.peerClients, id) + if err := c.Close(); err != nil { + s.logger.Warn("failed to close client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) + } + s.logger.Debug("create client", zap.String("id", id), zap.String("grpc_address", node.Metadata.GrpcAddress)) + if newClient, err := client.NewGRPCClientWithContextTLS(node.Metadata.GrpcAddress, context.TODO(), s.certificateFile, s.commonName); err == nil { + s.peerClients[id] = newClient + } else { + s.logger.Warn("failed to create client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) + } + } + } else { + s.logger.Debug("create client", zap.String("id", id), zap.String("grpc_address", node.Metadata.GrpcAddress)) + if newClient, err := client.NewGRPCClientWithContextTLS(node.Metadata.GrpcAddress, context.TODO(), s.certificateFile, s.commonName); err == nil { + s.peerClients[id] = newClient + } else { + s.logger.Warn("failed to create client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) + } + } + } + + // close clients for non-existent peer nodes + for id, c := range s.peerClients { + if _, exist := nodes[id]; !exist { + s.logger.Debug("close client", zap.String("id", id), zap.String("grpc_address", c.Target())) + delete(s.peerClients, id) + if err := c.Close(); err != nil { + s.logger.Warn("failed to close old client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) + } + } + } + + s.watchMutex.Unlock() + } + } +} + +func (s *GRPCService) stopWatchCluster() { + if s.watchClusterStopCh != nil { + s.logger.Info("send a request to stop updating a cluster") + close(s.watchClusterStopCh) + } + + s.logger.Info("wait for the cluster watching to stop") + <-s.watchClusterDoneCh + s.logger.Info("the cluster watching has been stopped") + + s.logger.Info("close all peer clients") + for id, c := range s.peerClients { + s.logger.Debug("close client", zap.String("id", id), zap.String("grpc_address", c.Target())) + delete(s.peerClients, id) + if err := c.Close(); err != nil { + s.logger.Warn("failed to close client", zap.String("id", id), zap.String("grpc_address", c.Target()), zap.Error(err)) + } + } +} + +func (s *GRPCService) LivenessCheck(ctx context.Context, req *empty.Empty) (*protobuf.LivenessCheckResponse, error) { + resp := &protobuf.LivenessCheckResponse{} + + resp.Alive = true + + return resp, nil +} + +func (s *GRPCService) ReadinessCheck(ctx context.Context, req *empty.Empty) (*protobuf.ReadinessCheckResponse, error) { + resp := &protobuf.ReadinessCheckResponse{} + + timeout := 10 * time.Second + if err := s.raftServer.WaitForDetectLeader(timeout); err != nil { + s.logger.Error("missing leader node", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + if s.raftServer.State() == raft.Candidate || s.raftServer.State() == raft.Shutdown { + err := errors.ErrNodeNotReady + s.logger.Error(err.Error(), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + resp.Ready = true + + return resp, nil +} + +func (s *GRPCService) Join(ctx context.Context, req *protobuf.JoinRequest) (*empty.Empty, error) { + resp := &empty.Empty{} + + if s.raftServer.raft.State() != raft.Leader { + clusterResp, err := s.Cluster(ctx, &empty.Empty{}) + if err != nil { + s.logger.Error("failed to get cluster info", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + c := s.peerClients[clusterResp.Cluster.Leader] + err = c.Join(req) + if err != nil { + s.logger.Error("failed to forward request", zap.String("grpc_address", c.Target()), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil + } + + err := s.raftServer.Join(req.Id, req.Node) + if err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + s.logger.Debug("node already exists", zap.Any("req", req), zap.Error(err)) + default: + s.logger.Error("failed to join node to the cluster", zap.String("id", req.Id), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + } + + return resp, nil +} + +func (s *GRPCService) Leave(ctx context.Context, req *protobuf.LeaveRequest) (*empty.Empty, error) { + resp := &empty.Empty{} + + if s.raftServer.raft.State() != raft.Leader { + clusterResp, err := s.Cluster(ctx, &empty.Empty{}) + if err != nil { + s.logger.Error("failed to get cluster info", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + c := s.peerClients[clusterResp.Cluster.Leader] + err = c.Leave(req) + if err != nil { + s.logger.Error("failed to forward request", zap.String("grpc_address", c.Target()), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil + } + + err := s.raftServer.Leave(req.Id) + if err != nil { + s.logger.Error("failed to leave node from the cluster", zap.Any("req", req), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) Node(ctx context.Context, req *empty.Empty) (*protobuf.NodeResponse, error) { + resp := &protobuf.NodeResponse{} + + node, err := s.raftServer.Node() + if err != nil { + s.logger.Error("failed to get node info", zap.String("err", err.Error())) + return resp, status.Error(codes.Internal, err.Error()) + } + + resp.Node = node + + return resp, nil +} + +func (s *GRPCService) Cluster(ctx context.Context, req *empty.Empty) (*protobuf.ClusterResponse, error) { + resp := &protobuf.ClusterResponse{} + + cluster := &protobuf.Cluster{} + + nodes, err := s.raftServer.Nodes() + if err != nil { + s.logger.Error("failed to get cluster info", zap.String("err", err.Error())) + return resp, status.Error(codes.Internal, err.Error()) + } + + for id, node := range nodes { + if id == s.raftServer.id { + node.State = s.raftServer.StateStr() + } else { + c := s.peerClients[id] + nodeResp, err := c.Node() + if err != nil { + node.State = raft.Shutdown.String() + s.logger.Error("failed to get node info", zap.String("grpc_address", node.Metadata.GrpcAddress), zap.String("err", err.Error())) + } else { + node.State = nodeResp.Node.State + } + } + } + cluster.Nodes = nodes + + serverID, err := s.raftServer.LeaderID(60 * time.Second) + if err != nil { + s.logger.Error("failed to get cluster info", zap.String("err", err.Error())) + return resp, status.Error(codes.Internal, err.Error()) + } + cluster.Leader = string(serverID) + + resp.Cluster = cluster + + return resp, nil +} + +func (s *GRPCService) Snapshot(ctx context.Context, req *empty.Empty) (*empty.Empty, error) { + resp := &empty.Empty{} + + err := s.raftServer.Snapshot() + if err != nil { + s.logger.Error("failed to snapshot data", zap.String("err", err.Error())) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) Get(ctx context.Context, req *protobuf.GetRequest) (*protobuf.GetResponse, error) { + resp := &protobuf.GetResponse{} + + fields, err := s.raftServer.Get(req.Id) + if err != nil { + switch err { + case errors.ErrNotFound: + s.logger.Debug("document not found", zap.String("id", req.Id), zap.String("err", err.Error())) + return resp, status.Error(codes.NotFound, err.Error()) + default: + s.logger.Error("failed to get document", zap.String("id", req.Id), zap.String("err", err.Error())) + return resp, status.Error(codes.Internal, err.Error()) + } + } + fieldsBytes, err := json.Marshal(fields) + if err != nil { + s.logger.Error("failed to marshal fields map to bytes", zap.Any("id", req.Id), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + resp.Fields = fieldsBytes + + return resp, nil +} + +func (s *GRPCService) Set(ctx context.Context, req *protobuf.SetRequest) (*empty.Empty, error) { + resp := &empty.Empty{} + + if s.raftServer.raft.State() != raft.Leader { + clusterResp, err := s.Cluster(ctx, &empty.Empty{}) + if err != nil { + s.logger.Error("failed to get cluster info", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + c := s.peerClients[clusterResp.Cluster.Leader] + if err = c.Set(req); err != nil { + s.logger.Error("failed to forward request to leader", zap.String("grpc_address", c.Target()), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil + } + + if err := s.raftServer.Set(req); err != nil { + s.logger.Error("failed to index document", zap.Any("id", req.Id), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) Delete(ctx context.Context, req *protobuf.DeleteRequest) (*empty.Empty, error) { + resp := &empty.Empty{} + + if s.raftServer.raft.State() != raft.Leader { + clusterResp, err := s.Cluster(ctx, &empty.Empty{}) + if err != nil { + s.logger.Error("failed to get cluster info", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + c := s.peerClients[clusterResp.Cluster.Leader] + if err = c.Delete(req); err != nil { + s.logger.Error("failed to forward request to leader", zap.String("grpc_address", c.Target()), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil + } + + if err := s.raftServer.Delete(req); err != nil { + s.logger.Error("failed to delete document", zap.String("id", req.Id), zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) BulkIndex(ctx context.Context, req *protobuf.BulkIndexRequest) (*protobuf.BulkIndexResponse, error) { + resp := &protobuf.BulkIndexResponse{} + + if s.raftServer.raft.State() != raft.Leader { + clusterResp, err := s.Cluster(ctx, &empty.Empty{}) + if err != nil { + s.logger.Error("failed to get cluster info", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + c := s.peerClients[clusterResp.Cluster.Leader] + return c.BulkIndex(req) + } + + if err := s.raftServer.BulkIndex(req); err != nil { + s.logger.Error("failed to index documents in bulk", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) BulkDelete(ctx context.Context, req *protobuf.BulkDeleteRequest) (*protobuf.BulkDeleteResponse, error) { + resp := &protobuf.BulkDeleteResponse{} + + if s.raftServer.raft.State() != raft.Leader { + clusterResp, err := s.Cluster(ctx, &empty.Empty{}) + if err != nil { + s.logger.Error("failed to get cluster info", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + c := s.peerClients[clusterResp.Cluster.Leader] + return c.BulkDelete(req) + } + + if err := s.raftServer.BulkDelete(req); err != nil { + s.logger.Error("failed to delete documents in bulk", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) Search(ctx context.Context, req *protobuf.SearchRequest) (*protobuf.SearchResponse, error) { + resp := &protobuf.SearchResponse{} + + searchRequest := &bleve.SearchRequest{} + if err := json.Unmarshal(req.SearchRequest, searchRequest); err != nil { + s.logger.Error("failed to unmarshal bytes to search request", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + searchResult, err := s.raftServer.Search(searchRequest) + if err != nil { + s.logger.Error("failed to search documents", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + searchResultBytes, err := json.Marshal(searchResult) + if err != nil { + s.logger.Error("failed to marshal search result to bytes", zap.Error(err)) + return resp, status.Error(codes.Internal, err.Error()) + } + + resp.SearchResult = searchResultBytes + + return resp, nil +} + +func (s *GRPCService) Mapping(ctx context.Context, req *empty.Empty) (*protobuf.MappingResponse, error) { + resp := &protobuf.MappingResponse{} + + var err error + + resp, err = s.raftServer.Mapping() + if err != nil { + s.logger.Error("failed to get document", zap.String("err", err.Error())) + return resp, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +func (s *GRPCService) Watch(req *empty.Empty, server protobuf.Index_WatchServer) error { + chans := make(chan protobuf.WatchResponse) + + s.watchMutex.Lock() + s.watchChans[chans] = struct{}{} + s.watchMutex.Unlock() + + defer func() { + s.watchMutex.Lock() + delete(s.watchChans, chans) + s.watchMutex.Unlock() + close(chans) + }() + + for resp := range chans { + if err := server.Send(&resp); err != nil { + s.logger.Error("failed to send watch data", zap.String("event", resp.Event.String()), zap.Error(err)) + return status.Error(codes.Internal, err.Error()) + } + } + + return nil +} + +func (s *GRPCService) Metrics(ctx context.Context, req *empty.Empty) (*protobuf.MetricsResponse, error) { + resp := &protobuf.MetricsResponse{} + + var err error + + gather, err := metric.Registry.Gather() + if err != nil { + s.logger.Error("failed to get gather", zap.Error(err)) + } + out := &bytes.Buffer{} + for _, mf := range gather { + if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { + s.logger.Error("failed to parse metric family", zap.Error(err)) + } + } + + resp.Metrics = out.Bytes() + + return resp, nil +} diff --git a/server/raft_fsm.go b/server/raft_fsm.go new file mode 100644 index 0000000..2f69a94 --- /dev/null +++ b/server/raft_fsm.go @@ -0,0 +1,400 @@ +package server + +import ( + "encoding/json" + "io" + "io/ioutil" + "sync" + "time" + + "github.com/blevesearch/bleve" + "github.com/blevesearch/bleve/mapping" + "github.com/golang/protobuf/proto" + "github.com/hashicorp/raft" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/storage" + "go.uber.org/zap" +) + +type ApplyResponse struct { + count int + error error +} + +type RaftFSM struct { + logger *zap.Logger + + index *storage.Index + metadata map[string]*protobuf.Metadata + nodesMutex sync.RWMutex + + applyCh chan *protobuf.Event +} + +func NewRaftFSM(path string, indexMapping *mapping.IndexMappingImpl, logger *zap.Logger) (*RaftFSM, error) { + index, err := storage.NewIndex(path, indexMapping, logger) + if err != nil { + logger.Error("failed to create index store", zap.String("path", path), zap.Error(err)) + return nil, err + } + + return &RaftFSM{ + logger: logger, + index: index, + metadata: make(map[string]*protobuf.Metadata, 0), + applyCh: make(chan *protobuf.Event, 1024), + }, nil +} + +func (f *RaftFSM) Close() error { + f.applyCh <- nil + f.logger.Info("apply channel has closed") + + if err := f.index.Close(); err != nil { + f.logger.Error("failed to close index store", zap.Error(err)) + return err + } + + f.logger.Info("Index has closed") + + return nil +} + +func (f *RaftFSM) get(id string) (map[string]interface{}, error) { + return f.index.Get(id) +} + +func (f *RaftFSM) search(searchRequest *bleve.SearchRequest) (*bleve.SearchResult, error) { + return f.index.Search(searchRequest) +} + +func (f *RaftFSM) set(id string, fields map[string]interface{}) error { + return f.index.Index(id, fields) +} + +func (f *RaftFSM) delete(id string) error { + return f.index.Delete(id) +} + +func (f *RaftFSM) bulkIndex(docs []map[string]interface{}) (int, error) { + return f.index.BulkIndex(docs) +} + +func (f *RaftFSM) bulkDelete(ids []string) (int, error) { + return f.index.BulkDelete(ids) +} + +func (f *RaftFSM) getMetadata(id string) *protobuf.Metadata { + if metadata, exists := f.metadata[id]; exists { + return metadata + } else { + f.logger.Debug("metadata not found", zap.String("id", id)) + return nil + } +} + +func (f *RaftFSM) setMetadata(id string, metadata *protobuf.Metadata) error { + f.nodesMutex.Lock() + defer f.nodesMutex.Unlock() + + f.metadata[id] = metadata + + return nil +} + +func (f *RaftFSM) deleteMetadata(id string) error { + f.nodesMutex.Lock() + defer f.nodesMutex.Unlock() + + if _, exists := f.metadata[id]; exists { + delete(f.metadata, id) + } + + return nil +} + +func (f *RaftFSM) Apply(l *raft.Log) interface{} { + var event protobuf.Event + err := proto.Unmarshal(l.Data, &event) + if err != nil { + f.logger.Error("failed to unmarshal message bytes to KVS command", zap.Error(err)) + return err + } + + switch event.Type { + case protobuf.Event_Join: + data, err := marshaler.MarshalAny(event.Data) + if err != nil { + f.logger.Error("failed to marshal to request from KVS command request", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + if data == nil { + err = errors.ErrNil + f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + req := data.(*protobuf.SetMetadataRequest) + + if err := f.setMetadata(req.Id, req.Metadata); err != nil { + return &ApplyResponse{error: err} + } + + f.applyCh <- &event + + return &ApplyResponse{} + case protobuf.Event_Leave: + data, err := marshaler.MarshalAny(event.Data) + if err != nil { + f.logger.Error("failed to marshal to request from KVS command request", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + if data == nil { + err = errors.ErrNil + f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + req := *data.(*protobuf.DeleteMetadataRequest) + + if err := f.deleteMetadata(req.Id); err != nil { + return &ApplyResponse{error: err} + } + + f.applyCh <- &event + + return &ApplyResponse{} + case protobuf.Event_Set: + data, err := marshaler.MarshalAny(event.Data) + if err != nil { + f.logger.Error("failed to marshal event data to set request", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + if data == nil { + err = errors.ErrNil + f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + req := *data.(*protobuf.SetRequest) + + var fields map[string]interface{} + if err := json.Unmarshal(req.Fields, &fields); err != nil { + return &ApplyResponse{error: err} + } + + if err := f.set(req.Id, fields); err != nil { + return &ApplyResponse{error: err} + } + + f.applyCh <- &event + + return &ApplyResponse{} + case protobuf.Event_Delete: + data, err := marshaler.MarshalAny(event.Data) + if err != nil { + f.logger.Error("failed to marshal event data to delete request", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + if data == nil { + err = errors.ErrNil + f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } + req := *data.(*protobuf.DeleteRequest) + + if err := f.delete(req.Id); err != nil { + return &ApplyResponse{error: err} + } + + f.applyCh <- &event + + return &ApplyResponse{} + case protobuf.Event_BulkIndex: + data, err := marshaler.MarshalAny(event.Data) + if err != nil { + f.logger.Error("failed to marshal event data to set request", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{count: -1, error: nil} + } + if data == nil { + err = errors.ErrNil + f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{count: -1, error: nil} + } + req := *data.(*protobuf.BulkIndexRequest) + + docs := make([]map[string]interface{}, 0) + for _, r := range req.Requests { + var fields map[string]interface{} + if err := json.Unmarshal(r.Fields, &fields); err != nil { + f.logger.Error("failed to unmarshal bytes to map", zap.String("id", r.Id), zap.Error(err)) + continue + } + + doc := map[string]interface{}{ + "id": r.Id, + "fields": fields, + } + docs = append(docs, doc) + } + + count, err := f.bulkIndex(docs) + if err != nil { + return &ApplyResponse{count: count, error: err} + } + + f.applyCh <- &event + + return &ApplyResponse{count: count, error: nil} + case protobuf.Event_BulkDelete: + data, err := marshaler.MarshalAny(event.Data) + if err != nil { + f.logger.Error("failed to marshal event data to set request", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{count: -1, error: nil} + } + if data == nil { + err = errors.ErrNil + f.logger.Error("request is nil", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{count: -1, error: nil} + } + req := *data.(*protobuf.BulkDeleteRequest) + + ids := make([]string, 0) + for _, r := range req.Requests { + ids = append(ids, r.Id) + } + + count, err := f.bulkDelete(ids) + if err != nil { + return &ApplyResponse{count: count, error: err} + } + + f.applyCh <- &event + + return &ApplyResponse{count: count, error: nil} + default: + err = errors.ErrUnsupportedEvent + f.logger.Error("unsupported command", zap.String("type", event.Type.String()), zap.Error(err)) + return &ApplyResponse{error: err} + } +} + +func (f *RaftFSM) Stats() map[string]interface{} { + return f.index.Stats() +} + +func (f *RaftFSM) Mapping() *mapping.IndexMappingImpl { + return f.index.Mapping() +} + +func (f *RaftFSM) Snapshot() (raft.FSMSnapshot, error) { + return &KVSFSMSnapshot{ + index: f.index, + logger: f.logger, + }, nil +} + +func (f *RaftFSM) Restore(rc io.ReadCloser) error { + start := time.Now() + + f.logger.Info("start to restore items") + + defer func() { + err := rc.Close() + if err != nil { + f.logger.Error("failed to close reader", zap.Error(err)) + } + }() + + data, err := ioutil.ReadAll(rc) + if err != nil { + f.logger.Error("failed to open reader", zap.Error(err)) + return err + } + + count := uint64(0) + + buff := proto.NewBuffer(data) + for { + doc := &protobuf.Document{} + err = buff.DecodeMessage(doc) + if err == io.ErrUnexpectedEOF { + f.logger.Debug("reached the EOF", zap.Error(err)) + break + } + if err != nil { + f.logger.Error("failed to read document", zap.Error(err)) + return err + } + + var fields map[string]interface{} + if err := json.Unmarshal(doc.Fields, &fields); err != nil { + f.logger.Error("failed to unmarshal fields bytes to map", zap.Error(err)) + continue + } + + // apply item to store + if err = f.index.Index(doc.Id, fields); err != nil { + f.logger.Error("failed to index document", zap.Error(err)) + continue + } + + f.logger.Debug("document restored", zap.String("id", doc.Id)) + count = count + 1 + } + + f.logger.Info("finished to restore items", zap.Uint64("count", count), zap.Float64("time", float64(time.Since(start))/float64(time.Second))) + + return nil +} + +// --------------------- + +type KVSFSMSnapshot struct { + index *storage.Index + logger *zap.Logger +} + +func (f *KVSFSMSnapshot) Persist(sink raft.SnapshotSink) error { + start := time.Now() + + f.logger.Info("start to persist items") + + defer func() { + if err := sink.Close(); err != nil { + f.logger.Error("failed to close sink", zap.Error(err)) + } + }() + + ch := f.index.SnapshotItems() + + count := uint64(0) + + for { + doc := <-ch + if doc == nil { + f.logger.Debug("channel closed") + break + } + + count = count + 1 + + buff := proto.NewBuffer([]byte{}) + if err := buff.EncodeMessage(doc); err != nil { + f.logger.Error("failed to encode document", zap.Error(err)) + return err + } + + if _, err := sink.Write(buff.Bytes()); err != nil { + f.logger.Error("failed to write document", zap.Error(err)) + return err + } + } + + f.logger.Info("finished to persist items", zap.Uint64("count", count), zap.Float64("time", float64(time.Since(start))/float64(time.Second))) + + return nil +} + +func (f *KVSFSMSnapshot) Release() { + f.logger.Info("release") +} diff --git a/server/raft_fsm_test.go b/server/raft_fsm_test.go new file mode 100644 index 0000000..865f623 --- /dev/null +++ b/server/raft_fsm_test.go @@ -0,0 +1,743 @@ +package server + +import ( + "encoding/json" + "os" + "path/filepath" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + "github.com/hashicorp/raft" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/log" + "github.com/mosuka/blast/mapping" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/util" +) + +func Test_RaftFSM_Close(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_Set(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + id := "1" + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + + if ret := fsm.set(id, fields); ret != nil { + t.Fatal("failed to index document") + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_Get(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + id := "1" + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + + if ret := fsm.set(id, fields); ret != nil { + t.Fatal("failed to index document") + } + + f, err := fsm.get(id) + if err != nil { + t.Fatalf("%v", err) + } + if fields["title"].(string) != f["title"].(string) { + t.Fatalf("expected content to see %v, saw %v", fields["title"].(string), f["title"].(string)) + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_Delete(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + id := "1" + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + + if ret := fsm.set(id, fields); ret != nil { + t.Fatal("failed to index document") + } + + f, err := fsm.get(id) + if err != nil { + t.Fatalf("%v", err) + } + if fields["title"].(string) != f["title"].(string) { + t.Fatalf("expected content to see %v, saw %v", fields["title"].(string), f["title"].(string)) + } + + if ret := fsm.delete(id); ret != nil { + t.Fatal("failed to delete document") + } + + f, err = fsm.get(id) + if err != nil { + switch err { + case errors.ErrNotFound: + // ok + default: + t.Fatal("failed to get document") + } + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_SetMetadata(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + id := "node1" + metadata := &protobuf.Metadata{ + GrpcAddress: ":9000", + HttpAddress: ":8000", + } + + if ret := fsm.setMetadata(id, metadata); ret != nil { + t.Fatal("failed to index document") + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_GetMetadata(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + id := "node1" + metadata := &protobuf.Metadata{ + GrpcAddress: ":9000", + HttpAddress: ":8000", + } + + if ret := fsm.setMetadata(id, metadata); ret != nil { + t.Fatal("failed to index document") + } + + m := fsm.getMetadata(id) + if metadata.GrpcAddress != m.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", metadata.GrpcAddress, m.GrpcAddress) + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_DeleteMetadata(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + id := "node1" + metadata := &protobuf.Metadata{ + GrpcAddress: ":9000", + HttpAddress: ":8000", + } + + if ret := fsm.setMetadata(id, metadata); ret != nil { + t.Fatal("failed to set metadata") + } + + m := fsm.getMetadata(id) + if metadata.GrpcAddress != m.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", metadata.GrpcAddress, m.GrpcAddress) + } + + if ret := fsm.deleteMetadata(id); ret != nil { + t.Fatal("failed to delete metadata") + } + + m = fsm.getMetadata(id) + if m != nil { + t.Fatalf("expected content to see %v, saw %v", nil, m.GrpcAddress) + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_ApplyJoin(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + data := &protobuf.SetMetadataRequest{ + Id: "node1", + Metadata: &protobuf.Metadata{ + GrpcAddress: ":9000", + HttpAddress: ":8000", + }, + } + + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(data, dataAny); err != nil { + t.Fatal("failed to unmarshal data to any") + } + + event := &protobuf.Event{ + Type: protobuf.Event_Join, + Data: dataAny, + } + + eventData, err := proto.Marshal(event) + if err != nil { + t.Fatal("failed to marshal event to bytes") + } + + raftLog := &raft.Log{ + Data: eventData, + } + + ret := fsm.Apply(raftLog) + if ret.(*ApplyResponse).error != nil { + t.Fatal("failed to apply data") + } + + m := fsm.getMetadata(data.Id) + if data.Metadata.GrpcAddress != m.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", data.Metadata.GrpcAddress, m.GrpcAddress) + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_ApplyLeave(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + // apply joni + setData := &protobuf.SetMetadataRequest{ + Id: "node1", + Metadata: &protobuf.Metadata{ + GrpcAddress: ":9000", + HttpAddress: ":8000", + }, + } + + setDataAny := &any.Any{} + if err := marshaler.UnmarshalAny(setData, setDataAny); err != nil { + t.Fatal("failed to unmarshal data to any") + } + + joinEvent := &protobuf.Event{ + Type: protobuf.Event_Join, + Data: setDataAny, + } + + joinEventData, err := proto.Marshal(joinEvent) + if err != nil { + t.Fatal("failed to marshal event to bytes") + } + + joinRaftLog := &raft.Log{ + Data: joinEventData, + } + + ret := fsm.Apply(joinRaftLog) + if ret.(*ApplyResponse).error != nil { + t.Fatal("failed to apply data") + } + + m := fsm.getMetadata(setData.Id) + if setData.Metadata.GrpcAddress != m.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", setData.Metadata.GrpcAddress, m.GrpcAddress) + } + + // apply leave + deleteData := &protobuf.DeleteMetadataRequest{ + Id: "node1", + } + + deleteDataAny := &any.Any{} + if err := marshaler.UnmarshalAny(deleteData, deleteDataAny); err != nil { + t.Fatal("failed to unmarshal data to any") + } + + leaveEvent := &protobuf.Event{ + Type: protobuf.Event_Leave, + Data: deleteDataAny, + } + + leaveEventData, err := proto.Marshal(leaveEvent) + if err != nil { + t.Fatal("failed to marshal event to bytes") + } + + leaveRaftLog := &raft.Log{ + Data: leaveEventData, + } + + ret = fsm.Apply(leaveRaftLog) + if ret.(*ApplyResponse).error != nil { + t.Fatal("failed to apply data") + } + + m = fsm.getMetadata(deleteData.Id) + if m != nil { + t.Fatalf("expected content to see %v, saw %v", nil, m.GrpcAddress) + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_ApplySet(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + fieldsBytes, err := json.Marshal(fields) + if err != nil { + t.Fatalf("%v", err) + } + + // apply set + setData := &protobuf.SetRequest{ + Id: "1", + Fields: fieldsBytes, + } + + setDataAny := &any.Any{} + if err := marshaler.UnmarshalAny(setData, setDataAny); err != nil { + t.Fatal("failed to unmarshal data to any") + } + + setEvent := &protobuf.Event{ + Type: protobuf.Event_Set, + Data: setDataAny, + } + + setEventData, err := proto.Marshal(setEvent) + if err != nil { + t.Fatal("failed to marshal event to bytes") + } + + setRaftLog := &raft.Log{ + Data: setEventData, + } + + ret := fsm.Apply(setRaftLog) + if ret.(*ApplyResponse).error != nil { + t.Fatal("failed to apply data") + } + + f, err := fsm.get(setData.Id) + if err != nil { + t.Fatal("failed to get document") + } + if fields["title"] != f["title"] { + t.Fatalf("expected content to see %v, saw %v", fields["title"], f["title"]) + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftFSM_ApplyDelete(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + fsm, err := NewRaftFSM(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if fsm == nil { + t.Fatal("failed to create index") + } + + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + fieldsBytes, err := json.Marshal(fields) + if err != nil { + t.Fatalf("%v", err) + } + + // apply set + setData := &protobuf.SetRequest{ + Id: "1", + Fields: fieldsBytes, + } + + setDataAny := &any.Any{} + if err := marshaler.UnmarshalAny(setData, setDataAny); err != nil { + t.Fatal("failed to unmarshal data to any") + } + + setEvent := &protobuf.Event{ + Type: protobuf.Event_Set, + Data: setDataAny, + } + + setEventData, err := proto.Marshal(setEvent) + if err != nil { + t.Fatal("failed to marshal event to bytes") + } + + setRaftLog := &raft.Log{ + Data: setEventData, + } + + ret := fsm.Apply(setRaftLog) + if ret.(*ApplyResponse).error != nil { + t.Fatal("failed to apply data") + } + + f, err := fsm.get(setData.Id) + if err != nil { + t.Fatal("failed to get document") + } + if fields["title"] != f["title"] { + t.Fatalf("expected content to see %v, saw %v", fields["title"], f["title"]) + } + + // apply delete + deleteData := &protobuf.DeleteRequest{ + Id: "1", + } + + deleteDataAny := &any.Any{} + if err := marshaler.UnmarshalAny(deleteData, deleteDataAny); err != nil { + t.Fatal("failed to unmarshal data to any") + } + + deleteEvent := &protobuf.Event{ + Type: protobuf.Event_Delete, + Data: deleteDataAny, + } + + deleteEventData, err := proto.Marshal(deleteEvent) + if err != nil { + t.Fatal("failed to marshal event to bytes") + } + + deleteRaftLog := &raft.Log{ + Data: deleteEventData, + } + + ret = fsm.Apply(deleteRaftLog) + if ret.(*ApplyResponse).error != nil { + t.Fatal("failed to apply data") + } + + f, err = fsm.get(deleteData.Id) + if err != nil { + switch err { + case errors.ErrNotFound: + // ok + default: + t.Fatal("failed to get document") + } + } + + if err := fsm.Close(); err != nil { + t.Fatalf("%v", err) + } +} diff --git a/server/raft_server.go b/server/raft_server.go new file mode 100644 index 0000000..ba27747 --- /dev/null +++ b/server/raft_server.go @@ -0,0 +1,857 @@ +package server + +import ( + "encoding/json" + "io/ioutil" + "net" + "os" + "path/filepath" + "strconv" + "time" + + raftbadgerdb "github.com/bbva/raft-badger" + "github.com/blevesearch/bleve" + "github.com/blevesearch/bleve/mapping" + "github.com/dgraph-io/badger/v2" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + "github.com/hashicorp/raft" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/marshaler" + "github.com/mosuka/blast/metric" + "github.com/mosuka/blast/protobuf" + "go.uber.org/zap" +) + +type RaftServer struct { + id string + raftAddress string + dataDirectory string + bootstrap bool + logger *zap.Logger + + fsm *RaftFSM + + transport *raft.NetworkTransport + raft *raft.Raft + + watchClusterStopCh chan struct{} + watchClusterDoneCh chan struct{} + + applyCh chan *protobuf.Event +} + +func NewRaftServer(id string, raftAddress string, dataDirectory string, indexMapping *mapping.IndexMappingImpl, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { + indexPath := filepath.Join(dataDirectory, "index") + fsm, err := NewRaftFSM(indexPath, indexMapping, logger) + if err != nil { + logger.Error("failed to create FSM", zap.String("index_path", indexPath), zap.Error(err)) + return nil, err + } + + return &RaftServer{ + id: id, + raftAddress: raftAddress, + dataDirectory: dataDirectory, + bootstrap: bootstrap, + fsm: fsm, + logger: logger, + + watchClusterStopCh: make(chan struct{}), + watchClusterDoneCh: make(chan struct{}), + + applyCh: make(chan *protobuf.Event, 1024), + }, nil +} + +func (s *RaftServer) Start() error { + config := raft.DefaultConfig() + config.LocalID = raft.ServerID(s.id) + config.SnapshotThreshold = 1024 + config.LogOutput = ioutil.Discard + + addr, err := net.ResolveTCPAddr("tcp", s.raftAddress) + if err != nil { + s.logger.Error("failed to resolve TCP address", zap.String("raft_address", s.raftAddress), zap.Error(err)) + return err + } + + s.transport, err = raft.NewTCPTransport(s.raftAddress, addr, 3, 10*time.Second, ioutil.Discard) + if err != nil { + s.logger.Error("failed to create TCP transport", zap.String("raft_address", s.raftAddress), zap.Error(err)) + return err + } + + // create snapshot store + snapshotStore, err := raft.NewFileSnapshotStore(s.dataDirectory, 2, ioutil.Discard) + if err != nil { + s.logger.Error("failed to create file snapshot store", zap.String("path", s.dataDirectory), zap.Error(err)) + return err + } + + logStorePath := filepath.Join(s.dataDirectory, "raft", "log") + err = os.MkdirAll(logStorePath, 0755) + if err != nil { + s.logger.Fatal(err.Error()) + return err + } + logStoreBadgerOpts := badger.DefaultOptions(logStorePath) + logStoreBadgerOpts.ValueDir = logStorePath + logStoreBadgerOpts.SyncWrites = false + logStoreBadgerOpts.Logger = nil + logStoreOpts := raftbadgerdb.Options{ + Path: logStorePath, + BadgerOptions: &logStoreBadgerOpts, + } + raftLogStore, err := raftbadgerdb.New(logStoreOpts) + if err != nil { + s.logger.Fatal(err.Error()) + return err + } + + stableStorePath := filepath.Join(s.dataDirectory, "raft", "stable") + err = os.MkdirAll(stableStorePath, 0755) + if err != nil { + s.logger.Fatal(err.Error()) + return err + } + stableStoreBadgerOpts := badger.DefaultOptions(stableStorePath) + stableStoreBadgerOpts.ValueDir = stableStorePath + stableStoreBadgerOpts.SyncWrites = false + stableStoreBadgerOpts.Logger = nil + stableStoreOpts := raftbadgerdb.Options{ + Path: stableStorePath, + BadgerOptions: &stableStoreBadgerOpts, + } + raftStableStore, err := raftbadgerdb.New(stableStoreOpts) + if err != nil { + s.logger.Fatal(err.Error()) + return err + } + + // create raft + s.raft, err = raft.NewRaft(config, s.fsm, raftLogStore, raftStableStore, snapshotStore, s.transport) + if err != nil { + s.logger.Error("failed to create raft", zap.Any("config", config), zap.Error(err)) + return err + } + + if s.bootstrap { + configuration := raft.Configuration{ + Servers: []raft.Server{ + { + ID: config.LocalID, + Address: s.transport.LocalAddr(), + }, + }, + } + s.raft.BootstrapCluster(configuration) + } + + go func() { + s.startWatchCluster(500 * time.Millisecond) + }() + + s.logger.Info("Raft server started", zap.String("raft_address", s.raftAddress)) + return nil +} + +func (s *RaftServer) Stop() error { + s.applyCh <- nil + s.logger.Info("apply channel has closed") + + s.stopWatchCluster() + + if err := s.fsm.Close(); err != nil { + s.logger.Error("failed to close FSM", zap.Error(err)) + } + s.logger.Info("Raft FSM Closed") + + if future := s.raft.Shutdown(); future.Error() != nil { + s.logger.Info("failed to shutdown Raft", zap.Error(future.Error())) + } + s.logger.Info("Raft has shutdown", zap.String("raft_address", s.raftAddress)) + + return nil +} + +func (s *RaftServer) startWatchCluster(checkInterval time.Duration) { + s.logger.Info("start to update cluster info") + + defer func() { + close(s.watchClusterDoneCh) + }() + + ticker := time.NewTicker(checkInterval) + defer ticker.Stop() + + timeout := 60 * time.Second + if err := s.WaitForDetectLeader(timeout); err != nil { + if err == errors.ErrTimeout { + s.logger.Error("leader detection timed out", zap.Duration("timeout", timeout), zap.Error(err)) + } else { + s.logger.Error("failed to detect leader", zap.Error(err)) + } + } + + for { + select { + case <-s.watchClusterStopCh: + s.logger.Info("received a request to stop updating a cluster") + return + case <-s.raft.LeaderCh(): + s.logger.Info("became a leader", zap.String("leaderAddr", string(s.raft.Leader()))) + case event := <-s.fsm.applyCh: + s.applyCh <- event + case <-ticker.C: + raftStats := s.raft.Stats() + + switch raftStats["state"] { + case "Follower": + metric.RaftStateMetric.WithLabelValues(s.id).Set(float64(raft.Follower)) + case "Candidate": + metric.RaftStateMetric.WithLabelValues(s.id).Set(float64(raft.Candidate)) + case "Leader": + metric.RaftStateMetric.WithLabelValues(s.id).Set(float64(raft.Leader)) + case "Shutdown": + metric.RaftStateMetric.WithLabelValues(s.id).Set(float64(raft.Shutdown)) + } + + if term, err := strconv.ParseFloat(raftStats["term"], 64); err == nil { + metric.RaftTermMetric.WithLabelValues(s.id).Set(term) + } + + if lastLogIndex, err := strconv.ParseFloat(raftStats["last_log_index"], 64); err == nil { + metric.RaftLastLogIndexMetric.WithLabelValues(s.id).Set(lastLogIndex) + } + + if lastLogTerm, err := strconv.ParseFloat(raftStats["last_log_term"], 64); err == nil { + metric.RaftLastLogTermMetric.WithLabelValues(s.id).Set(lastLogTerm) + } + + if commitIndex, err := strconv.ParseFloat(raftStats["commit_index"], 64); err == nil { + metric.RaftCommitIndexMetric.WithLabelValues(s.id).Set(commitIndex) + } + + if appliedIndex, err := strconv.ParseFloat(raftStats["applied_index"], 64); err == nil { + metric.RaftAppliedIndexMetric.WithLabelValues(s.id).Set(appliedIndex) + } + + if fsmPending, err := strconv.ParseFloat(raftStats["fsm_pending"], 64); err == nil { + metric.RaftFsmPendingMetric.WithLabelValues(s.id).Set(fsmPending) + } + + if lastSnapshotIndex, err := strconv.ParseFloat(raftStats["last_snapshot_index"], 64); err == nil { + metric.RaftLastSnapshotIndexMetric.WithLabelValues(s.id).Set(lastSnapshotIndex) + } + + if lastSnapshotTerm, err := strconv.ParseFloat(raftStats["last_snapshot_term"], 64); err == nil { + metric.RaftLastSnapshotTermMetric.WithLabelValues(s.id).Set(lastSnapshotTerm) + } + + if latestConfigurationIndex, err := strconv.ParseFloat(raftStats["latest_configuration_index"], 64); err == nil { + metric.RaftLatestConfigurationIndexMetric.WithLabelValues(s.id).Set(latestConfigurationIndex) + } + + if numPeers, err := strconv.ParseFloat(raftStats["num_peers"], 64); err == nil { + metric.RaftNumPeersMetric.WithLabelValues(s.id).Set(numPeers) + } + + if lastContact, err := strconv.ParseFloat(raftStats["last_contact"], 64); err == nil { + metric.RaftLastContactMetric.WithLabelValues(s.id).Set(lastContact) + } + + if nodes, err := s.Nodes(); err == nil { + metric.RaftNumNodesMetric.WithLabelValues(s.id).Set(float64(len(nodes))) + } + + indexStats := s.fsm.Stats() + + tmpIndex := indexStats["index"].(map[string]interface{}) + + metric.IndexCurOnDiskBytesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["CurOnDiskBytes"].(uint64))) + + metric.IndexCurOnDiskFilesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["CurOnDiskFiles"].(uint64))) + + metric.IndexCurRootEpochMetric.WithLabelValues(s.id).Set(float64(tmpIndex["CurRootEpoch"].(uint64))) + + metric.IndexLastMergedEpochMetric.WithLabelValues(s.id).Set(float64(tmpIndex["LastMergedEpoch"].(uint64))) + + metric.IndexLastPersistedEpochMetric.WithLabelValues(s.id).Set(float64(tmpIndex["LastPersistedEpoch"].(uint64))) + + metric.IndexMaxBatchIntroTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["MaxBatchIntroTime"].(uint64))) + + metric.IndexMaxFileMergeZapTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["MaxFileMergeZapTime"].(uint64))) + + metric.IndexMaxMemMergeZapTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["MaxMemMergeZapTime"].(uint64))) + + metric.IndexTotAnalysisTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotAnalysisTime"].(uint64))) + + metric.IndexTotBatchIntroTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotBatchIntroTime"].(uint64))) + + metric.IndexTotBatchesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotBatches"].(uint64))) + + metric.IndexTotBatchesEmptyMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotBatchesEmpty"].(uint64))) + + metric.IndexTotDeletesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotDeletes"].(uint64))) + + metric.IndexTotFileMergeIntroductionsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeIntroductions"].(uint64))) + + metric.IndexTotFileMergeIntroductionsDoneMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeIntroductionsDone"].(uint64))) + + metric.IndexTotFileMergeIntroductionsSkippedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeIntroductionsSkipped"].(uint64))) + + metric.IndexTotFileMergeLoopBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeLoopBeg"].(uint64))) + + metric.IndexTotFileMergeLoopEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeLoopEnd"].(uint64))) + + metric.IndexTotFileMergeLoopErrMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeLoopErr"].(uint64))) + + metric.IndexTotFileMergePlanMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlan"].(uint64))) + + metric.IndexTotFileMergePlanErrMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanErr"].(uint64))) + + metric.IndexTotFileMergePlanNoneMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanNone"].(uint64))) + + metric.IndexTotFileMergePlanOkMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanOk"].(uint64))) + + metric.IndexTotFileMergePlanTasksMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanTasks"].(uint64))) + + metric.IndexTotFileMergePlanTasksDoneMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanTasksDone"].(uint64))) + + metric.IndexTotFileMergePlanTasksErrMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanTasksErr"].(uint64))) + + metric.IndexTotFileMergePlanTasksSegmentsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanTasksSegments"].(uint64))) + + metric.IndexTotFileMergePlanTasksSegmentsEmptyMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergePlanTasksSegmentsEmpty"].(uint64))) + + metric.IndexTotFileMergeSegmentsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeSegments"].(uint64))) + + metric.IndexTotFileMergeSegmentsEmptyMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeSegmentsEmpty"].(uint64))) + + metric.IndexTotFileMergeWrittenBytesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeWrittenBytes"].(uint64))) + + metric.IndexTotFileMergeZapBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeZapBeg"].(uint64))) + + metric.IndexTotFileMergeZapEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeZapEnd"].(uint64))) + + metric.IndexTotFileMergeZapTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileMergeZapTime"].(uint64))) + + metric.IndexTotFileSegmentsAtRootMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotFileSegmentsAtRoot"].(uint64))) + + metric.IndexTotIndexTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIndexTime"].(uint64))) + + metric.IndexTotIndexedPlainTextBytesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIndexedPlainTextBytes"].(uint64))) + + metric.IndexTotIntroduceLoopMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceLoop"].(uint64))) + + metric.IndexTotIntroduceMergeBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceMergeBeg"].(uint64))) + + metric.IndexTotIntroduceMergeEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceMergeEnd"].(uint64))) + + metric.IndexTotIntroducePersistBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroducePersistBeg"].(uint64))) + + metric.IndexTotIntroducePersistEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroducePersistEnd"].(uint64))) + + metric.IndexTotIntroduceRevertBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceRevertBeg"].(uint64))) + + metric.IndexTotIntroduceRevertEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceRevertEnd"].(uint64))) + + metric.IndexTotIntroduceSegmentBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceSegmentBeg"].(uint64))) + + metric.IndexTotIntroduceSegmentEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroduceSegmentEnd"].(uint64))) + + metric.IndexTotIntroducedItemsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroducedItems"].(uint64))) + + metric.IndexTotIntroducedSegmentsBatchMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroducedSegmentsBatch"].(uint64))) + + metric.IndexTotIntroducedSegmentsMergeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotIntroducedSegmentsMerge"].(uint64))) + + metric.IndexTotItemsToPersistMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotItemsToPersist"].(uint64))) + + metric.IndexTotMemMergeBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeBeg"].(uint64))) + + metric.IndexTotMemMergeDoneMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeDone"].(uint64))) + + metric.IndexTotMemMergeErrMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeErr"].(uint64))) + + metric.IndexTotMemMergeSegmentsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeSegments"].(uint64))) + + metric.IndexTotMemMergeZapBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeZapBeg"].(uint64))) + + metric.IndexTotMemMergeZapEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeZapEnd"].(uint64))) + + metric.IndexTotMemMergeZapTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemMergeZapTime"].(uint64))) + + metric.IndexTotMemorySegmentsAtRootMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotMemorySegmentsAtRoot"].(uint64))) + + metric.IndexTotOnErrorsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotOnErrors"].(uint64))) + + metric.IndexTotPersistLoopBegMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistLoopBeg"].(uint64))) + + metric.IndexTotPersistLoopEndMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistLoopEnd"].(uint64))) + + metric.IndexTotPersistLoopErrMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistLoopErr"].(uint64))) + + metric.IndexTotPersistLoopProgressMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistLoopProgress"].(uint64))) + + metric.IndexTotPersistLoopWaitMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistLoopWait"].(uint64))) + + metric.IndexTotPersistLoopWaitNotifiedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistLoopWaitNotified"].(uint64))) + + metric.IndexTotPersistedItemsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistedItems"].(uint64))) + + metric.IndexTotPersistedSegmentsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersistedSegments"].(uint64))) + + metric.IndexTotPersisterMergerNapBreakMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersisterMergerNapBreak"].(uint64))) + + metric.IndexTotPersisterNapPauseCompletedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersisterNapPauseCompleted"].(uint64))) + + metric.IndexTotPersisterSlowMergerPauseMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersisterSlowMergerPause"].(uint64))) + + metric.IndexTotPersisterSlowMergerResumeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotPersisterSlowMergerResume"].(uint64))) + + metric.IndexTotTermSearchersFinishedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotTermSearchersFinished"].(uint64))) + + metric.IndexTotTermSearchersStartedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotTermSearchersStarted"].(uint64))) + + metric.IndexTotUpdatesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["TotUpdates"].(uint64))) + + metric.IndexAnalysisTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["analysis_time"].(uint64))) + + metric.IndexBatchesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["batches"].(uint64))) + + metric.IndexDeletesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["deletes"].(uint64))) + + metric.IndexErrorsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["errors"].(uint64))) + + metric.IndexIndexTimeMetric.WithLabelValues(s.id).Set(float64(tmpIndex["index_time"].(uint64))) + + metric.IndexNumBytesUsedDiskMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_bytes_used_disk"].(uint64))) + + metric.IndexNumFilesOnDiskMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_files_on_disk"].(uint64))) + + metric.IndexNumItemsIntroducedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_items_introduced"].(uint64))) + + metric.IndexNumItemsPersistedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_items_persisted"].(uint64))) + + metric.IndexNumPersisterNapMergerBreakMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_persister_nap_merger_break"].(uint64))) + + metric.IndexNumPersisterNapPauseCompletedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_persister_nap_pause_completed"].(uint64))) + + metric.IndexNumPlainTextBytesIndexedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_plain_text_bytes_indexed"].(uint64))) + + metric.IndexNumRecsToPersistMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_recs_to_persist"].(uint64))) + + metric.IndexNumRootFilesegmentsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_root_filesegments"].(uint64))) + + metric.IndexNumRootMemorysegmentsMetric.WithLabelValues(s.id).Set(float64(tmpIndex["num_root_memorysegments"].(uint64))) + + metric.IndexTermSearchersFinishedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["term_searchers_finished"].(uint64))) + + metric.IndexTermSearchersStartedMetric.WithLabelValues(s.id).Set(float64(tmpIndex["term_searchers_started"].(uint64))) + + metric.IndexTotalCompactionWrittenBytesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["total_compaction_written_bytes"].(uint64))) + + metric.IndexUpdatesMetric.WithLabelValues(s.id).Set(float64(tmpIndex["updates"].(uint64))) + + metric.SearchTimeMetric.WithLabelValues(s.id).Set(float64(indexStats["search_time"].(uint64))) + + metric.SearchesMetric.WithLabelValues(s.id).Set(float64(indexStats["searches"].(uint64))) + } + } +} + +func (s *RaftServer) stopWatchCluster() { + if s.watchClusterStopCh != nil { + s.logger.Info("send a request to stop updating a cluster") + close(s.watchClusterStopCh) + } + + s.logger.Info("wait for the cluster update to stop") + <-s.watchClusterDoneCh + s.logger.Info("the cluster update has been stopped") +} + +func (s *RaftServer) LeaderAddress(timeout time.Duration) (raft.ServerAddress, error) { + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + timer := time.NewTimer(timeout) + defer timer.Stop() + + for { + select { + case <-ticker.C: + leaderAddr := s.raft.Leader() + if leaderAddr != "" { + s.logger.Debug("detected a leader address", zap.String("raft_address", string(leaderAddr))) + return leaderAddr, nil + } + case <-timer.C: + err := errors.ErrTimeout + s.logger.Error("failed to detect leader address", zap.Error(err)) + return "", err + } + } +} + +func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { + leaderAddr, err := s.LeaderAddress(timeout) + if err != nil { + s.logger.Error("failed to get leader address", zap.Error(err)) + return "", err + } + + cf := s.raft.GetConfiguration() + if err = cf.Error(); err != nil { + s.logger.Error("failed to get Raft configuration", zap.Error(err)) + return "", err + } + + for _, server := range cf.Configuration().Servers { + if server.Address == leaderAddr { + s.logger.Info("detected a leader ID", zap.String("id", string(server.ID))) + return server.ID, nil + } + } + + err = errors.ErrNotFoundLeader + s.logger.Error("failed to detect leader ID", zap.Error(err)) + return "", err +} + +func (s *RaftServer) WaitForDetectLeader(timeout time.Duration) error { + if _, err := s.LeaderAddress(timeout); err != nil { + s.logger.Error("failed to wait for detect leader", zap.Error(err)) + return err + } + + return nil +} + +func (s *RaftServer) State() raft.RaftState { + return s.raft.State() +} + +func (s *RaftServer) StateStr() string { + return s.State().String() +} + +func (s *RaftServer) Exist(id string) (bool, error) { + exist := false + + cf := s.raft.GetConfiguration() + if err := cf.Error(); err != nil { + s.logger.Error("failed to get Raft configuration", zap.Error(err)) + return false, err + } + + for _, server := range cf.Configuration().Servers { + if server.ID == raft.ServerID(id) { + s.logger.Debug("node already joined the cluster", zap.String("id", id)) + exist = true + break + } + } + + return exist, nil +} + +func (s *RaftServer) getMetadata(id string) (*protobuf.Metadata, error) { + metadata := s.fsm.getMetadata(id) + if metadata == nil { + return nil, errors.ErrNotFound + } + + return metadata, nil +} + +func (s *RaftServer) setMetadata(id string, metadata *protobuf.Metadata) error { + data := &protobuf.SetMetadataRequest{ + Id: id, + Metadata: metadata, + } + + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(data, dataAny); err != nil { + s.logger.Error("failed to unmarshal request to the command data", zap.String("id", id), zap.Any("metadata", metadata), zap.Error(err)) + return err + } + + event := &protobuf.Event{ + Type: protobuf.Event_Join, + Data: dataAny, + } + + msg, err := proto.Marshal(event) + if err != nil { + s.logger.Error("failed to marshal the command into the bytes as message", zap.String("id", id), zap.Any("metadata", metadata), zap.Error(err)) + return err + } + + timeout := 60 * time.Second + if future := s.raft.Apply(msg, timeout); future.Error() != nil { + s.logger.Error("failed to apply message bytes", zap.Duration("timeout", timeout), zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) deleteMetadata(id string) error { + data := &protobuf.DeleteMetadataRequest{ + Id: id, + } + + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(data, dataAny); err != nil { + s.logger.Error("failed to unmarshal request to the command data", zap.String("id", id), zap.Error(err)) + return err + } + + event := &protobuf.Event{ + Type: protobuf.Event_Leave, + Data: dataAny, + } + + msg, err := proto.Marshal(event) + if err != nil { + s.logger.Error("failed to marshal the command into the bytes as the message", zap.String("id", id), zap.Error(err)) + return err + } + + timeout := 60 * time.Second + if future := s.raft.Apply(msg, timeout); future.Error() != nil { + s.logger.Error("failed to apply message bytes", zap.Duration("timeout", timeout), zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) Join(id string, node *protobuf.Node) error { + exist, err := s.Exist(id) + if err != nil { + return err + } + + if !exist { + if future := s.raft.AddVoter(raft.ServerID(id), raft.ServerAddress(node.RaftAddress), 0, 0); future.Error() != nil { + s.logger.Error("failed to add voter", zap.String("id", id), zap.String("raft_address", node.RaftAddress), zap.Error(future.Error())) + return future.Error() + } + s.logger.Info("node has successfully joined", zap.String("id", id), zap.String("raft_address", node.RaftAddress)) + } + + if err := s.setMetadata(id, node.Metadata); err != nil { + return err + } + + if exist { + return errors.ErrNodeAlreadyExists + } + + return nil +} + +func (s *RaftServer) Leave(id string) error { + exist, err := s.Exist(id) + if err != nil { + return err + } + + if exist { + if future := s.raft.RemoveServer(raft.ServerID(id), 0, 0); future.Error() != nil { + s.logger.Error("failed to remove server", zap.String("id", id), zap.Error(future.Error())) + return future.Error() + } + s.logger.Info("node has successfully left", zap.String("id", id)) + } + + if err = s.deleteMetadata(id); err != nil { + return err + } + + if !exist { + return errors.ErrNodeDoesNotExist + } + + return nil +} + +func (s *RaftServer) Node() (*protobuf.Node, error) { + nodes, err := s.Nodes() + if err != nil { + return nil, err + } + + node, ok := nodes[s.id] + if !ok { + return nil, errors.ErrNotFound + } + + node.State = s.StateStr() + + return node, nil +} + +func (s *RaftServer) Nodes() (map[string]*protobuf.Node, error) { + cf := s.raft.GetConfiguration() + if err := cf.Error(); err != nil { + s.logger.Error("failed to get Raft configuration", zap.Error(err)) + return nil, err + } + + nodes := make(map[string]*protobuf.Node, 0) + for _, server := range cf.Configuration().Servers { + metadata, _ := s.getMetadata(string(server.ID)) + + nodes[string(server.ID)] = &protobuf.Node{ + RaftAddress: string(server.Address), + Metadata: metadata, + } + } + + return nodes, nil +} + +func (s *RaftServer) Snapshot() error { + if future := s.raft.Snapshot(); future.Error() != nil { + s.logger.Error("failed to snapshot", zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) Get(id string) (map[string]interface{}, error) { + return s.fsm.get(id) +} + +func (s *RaftServer) Search(searchRequest *bleve.SearchRequest) (*bleve.SearchResult, error) { + return s.fsm.search(searchRequest) +} + +func (s *RaftServer) Set(req *protobuf.SetRequest) error { + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(req, dataAny); err != nil { + s.logger.Error("failed to unmarshal document map to any", zap.Error(err)) + return err + } + + event := &protobuf.Event{ + Type: protobuf.Event_Set, + Data: dataAny, + } + + msg, err := proto.Marshal(event) + if err != nil { + s.logger.Error("failed to marshal event to bytes", zap.Error(err)) + return err + } + + timeout := 60 * time.Second + if future := s.raft.Apply(msg, timeout); future.Error() != nil { + s.logger.Error("failed to apply message bytes", zap.Duration("timeout", timeout), zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) Delete(req *protobuf.DeleteRequest) error { + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(req, dataAny); err != nil { + s.logger.Error("failed to unmarshal id to any", zap.Error(err)) + return err + } + + c := &protobuf.Event{ + Type: protobuf.Event_Delete, + Data: dataAny, + } + + msg, err := proto.Marshal(c) + if err != nil { + s.logger.Error("failed to marshal event to bytes", zap.Error(err)) + return err + } + + timeout := 60 * time.Second + if future := s.raft.Apply(msg, timeout); future.Error() != nil { + s.logger.Error("failed to apply message bytes", zap.Duration("timeout", timeout), zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) BulkIndex(req *protobuf.BulkIndexRequest) error { + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(req, dataAny); err != nil { + s.logger.Error("failed to unmarshal bulk index request to any", zap.Error(err)) + return err + } + + event := &protobuf.Event{ + Type: protobuf.Event_BulkIndex, + Data: dataAny, + } + + msg, err := proto.Marshal(event) + if err != nil { + s.logger.Error("failed to marshal event to bytes", zap.Error(err)) + return err + } + + timeout := 60 * time.Second + if future := s.raft.Apply(msg, timeout); future.Error() != nil { + s.logger.Error("failed to apply message bytes", zap.Duration("timeout", timeout), zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) BulkDelete(req *protobuf.BulkDeleteRequest) error { + dataAny := &any.Any{} + if err := marshaler.UnmarshalAny(req, dataAny); err != nil { + s.logger.Error("failed to unmarshal set request to any", zap.Error(err)) + return err + } + + event := &protobuf.Event{ + Type: protobuf.Event_BulkDelete, + Data: dataAny, + } + + msg, err := proto.Marshal(event) + if err != nil { + s.logger.Error("failed to marshal event to bytes", zap.Error(err)) + return err + } + + timeout := 60 * time.Second + if future := s.raft.Apply(msg, timeout); future.Error() != nil { + s.logger.Error("failed to apply message bytes", zap.Duration("timeout", timeout), zap.Error(future.Error())) + return future.Error() + } + + return nil +} + +func (s *RaftServer) Mapping() (*protobuf.MappingResponse, error) { + resp := &protobuf.MappingResponse{} + + m := s.fsm.Mapping() + + fieldsBytes, err := json.Marshal(m) + if err != nil { + s.logger.Error("failed to marshal mapping to bytes", zap.Error(err)) + return resp, err + } + + resp.Mapping = fieldsBytes + + return resp, nil +} diff --git a/server/raft_server_test.go b/server/raft_server_test.go new file mode 100644 index 0000000..11a1b65 --- /dev/null +++ b/server/raft_server_test.go @@ -0,0 +1,1536 @@ +package server + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "testing" + "time" + + "github.com/hashicorp/raft" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/log" + "github.com/mosuka/blast/mapping" + "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/util" +) + +func Test_RaftServer_Close(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + time.Sleep(10 * time.Second) +} + +func Test_RaftServer_LeaderAddress(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer("node1", raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + leaderAddress, err := server.LeaderAddress(60 * time.Second) + if err != nil { + t.Fatalf("%v", err) + } + if raftAddress != string(leaderAddress) { + t.Fatalf("expected content to see %v, saw %v", raftAddress, string(leaderAddress)) + } +} + +func Test_RaftServer_LeaderID(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + leaderId, err := server.LeaderID(60 * time.Second) + if err != nil { + t.Fatalf("%v", err) + } + if id != string(leaderId) { + t.Fatalf("expected content to see %v, saw %v", id, string(leaderId)) + } +} + +func Test_RaftServer_WaitForDetectLeader(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftServer_State(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + state := server.State() + if raft.Leader != state { + t.Fatalf("expected content to see %v, saw %v", raft.Leader, state) + } +} + +func Test_RaftServer_StateStr(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + state := server.StateStr() + if raft.Leader.String() != state { + t.Fatalf("expected content to see %v, saw %v", raft.Leader.String(), state) + } +} + +func Test_RaftServer_Exist(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + exist, err := server.Exist(id) + if err != nil { + t.Fatalf("%v", err) + } + if !exist { + t.Fatalf("expected content to see %v, saw %v", true, exist) + } + + exist, err = server.Exist("non-existent-id") + if err != nil { + t.Fatalf("%v", err) + } + if exist { + t.Fatalf("expected content to see %v, saw %v", false, exist) + } +} + +func Test_RaftServer_setMetadata(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + metadata := &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + } + + if err := server.setMetadata(id, metadata); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftServer_getMetadata(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + metadata := &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + } + + if err := server.setMetadata(id, metadata); err != nil { + t.Fatalf("%v", err) + } + + m, err := server.getMetadata(id) + if err != nil { + t.Fatalf("%v", err) + } + if grpcAddress != m.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, m.GrpcAddress) + } + if httpAddress != m.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, m.HttpAddress) + } +} + +func Test_RaftServer_deleteMetadata(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + metadata := &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + } + + // set + if err := server.setMetadata(id, metadata); err != nil { + t.Fatalf("%v", err) + } + + // get + m, err := server.getMetadata(id) + if err != nil { + t.Fatalf("%v", err) + } + if grpcAddress != m.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, m.GrpcAddress) + } + if httpAddress != m.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, m.HttpAddress) + } + + // delete + if err := server.deleteMetadata(id); err != nil { + t.Fatalf("%v", err) + } + + //get + m, err = server.getMetadata(id) + if err != nil { + switch err { + case errors.ErrNotFound: + // ok + default: + t.Fatalf("%v", err) + } + } + if err == nil { + t.Fatalf("expected content to see %v, saw %v", nil, err) + } +} + +func Test_RaftServer_Join(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + node := &protobuf.Node{ + RaftAddress: raftAddress, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } + + if err := server.Join(id, node); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } +} + +func Test_RaftServer_Node(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + node := &protobuf.Node{ + RaftAddress: raftAddress, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } + + if err := server.Join(id, node); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + n, err := server.Node() + if err != nil { + t.Fatalf("%v", err) + } + if raftAddress != n.RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, n.RaftAddress) + } + if grpcAddress != n.Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, n.Metadata.GrpcAddress) + } + if httpAddress != n.Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, n.Metadata.HttpAddress) + } +} + +func Test_RaftServer_Cluster(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + node := &protobuf.Node{ + RaftAddress: raftAddress, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } + + if err := server.Join(id, node); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + // ---------- + + id2 := "node2" + raftAddress2 := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress2 := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", util.TmpPort()) + + dir2 := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir2) + }() + + indexMapping2, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger2 := log.NewLogger("WARN", "", 500, 3, 30, false) + + server2, err := NewRaftServer(id2, raftAddress2, dir2, indexMapping2, false, logger2) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server2.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server2.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + node2 := &protobuf.Node{ + RaftAddress: raftAddress2, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + if err := server.Join(id2, node2); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + // ---------- + + id3 := "node3" + raftAddress3 := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress3 := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", util.TmpPort()) + + dir3 := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir3) + }() + + indexMapping3, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger3 := log.NewLogger("WARN", "", 500, 3, 30, false) + + server3, err := NewRaftServer(id3, raftAddress3, dir3, indexMapping3, false, logger3) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server3.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server3.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + node3 := &protobuf.Node{ + RaftAddress: raftAddress3, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + if err := server.Join(id3, node3); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + ns, err := server.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 3 != len(ns) { + t.Fatalf("expected content to see %v, saw %v", 3, len(ns)) + } + if raftAddress != ns[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns[id].RaftAddress) + } + if grpcAddress != ns[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns[id].Metadata.GrpcAddress) + } + if httpAddress != ns[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns[id].Metadata.HttpAddress) + } + if raftAddress2 != ns[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns[id2].RaftAddress) + } + if grpcAddress2 != ns[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns[id2].Metadata.HttpAddress) + } + if raftAddress3 != ns[id3].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress3, ns[id3].RaftAddress) + } + if grpcAddress3 != ns[id3].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress3, ns[id3].Metadata.GrpcAddress) + } + if httpAddress3 != ns[id3].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress3, ns[id3].Metadata.HttpAddress) + } + + time.Sleep(3 * time.Second) + + ns2, err := server2.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 3 != len(ns2) { + t.Fatalf("expected content to see %v, saw %v", 3, len(ns2)) + } + if raftAddress != ns2[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns2[id].RaftAddress) + } + if grpcAddress != ns2[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns2[id].Metadata.GrpcAddress) + } + if httpAddress != ns2[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns2[id].Metadata.HttpAddress) + } + if raftAddress2 != ns2[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns2[id2].RaftAddress) + } + if grpcAddress2 != ns2[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns2[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns2[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns2[id2].Metadata.HttpAddress) + } + if raftAddress3 != ns2[id3].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress3, ns2[id3].RaftAddress) + } + if grpcAddress3 != ns2[id3].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress3, ns2[id3].Metadata.GrpcAddress) + } + if httpAddress3 != ns2[id3].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress3, ns2[id3].Metadata.HttpAddress) + } + + time.Sleep(3 * time.Second) + + ns3, err := server3.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 3 != len(ns3) { + t.Fatalf("expected content to see %v, saw %v", 3, len(ns3)) + } + if raftAddress != ns3[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns3[id].RaftAddress) + } + if grpcAddress != ns3[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns3[id].Metadata.GrpcAddress) + } + if httpAddress != ns3[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns3[id].Metadata.HttpAddress) + } + if raftAddress2 != ns3[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns3[id2].RaftAddress) + } + if grpcAddress2 != ns3[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns3[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns3[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns3[id2].Metadata.HttpAddress) + } + if raftAddress3 != ns3[id3].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress3, ns3[id3].RaftAddress) + } + if grpcAddress3 != ns3[id3].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress3, ns3[id3].Metadata.GrpcAddress) + } + if httpAddress3 != ns3[id3].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress3, ns3[id3].Metadata.HttpAddress) + } +} + +func Test_RaftServer_Leave(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + id := "node1" + raftAddress := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer(id, raftAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(60 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + node := &protobuf.Node{ + RaftAddress: raftAddress, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } + + if err := server.Join(id, node); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + // ---------- + + id2 := "node2" + raftAddress2 := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress2 := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", util.TmpPort()) + + dir2 := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir2) + }() + + indexMapping2, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger2 := log.NewLogger("WARN", "", 500, 3, 30, false) + + server2, err := NewRaftServer(id2, raftAddress2, dir2, indexMapping2, false, logger2) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server2.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server2.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + node2 := &protobuf.Node{ + RaftAddress: raftAddress2, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + if err := server.Join(id2, node2); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + // ---------- + + id3 := "node3" + raftAddress3 := fmt.Sprintf(":%d", util.TmpPort()) + grpcAddress3 := fmt.Sprintf(":%d", util.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", util.TmpPort()) + + dir3 := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir3) + }() + + indexMapping3, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger3 := log.NewLogger("WARN", "", 500, 3, 30, false) + + server3, err := NewRaftServer(id3, raftAddress3, dir3, indexMapping3, false, logger3) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server3.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server3.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + node3 := &protobuf.Node{ + RaftAddress: raftAddress3, + Metadata: &protobuf.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + if err := server.Join(id3, node3); err != nil { + switch err { + case errors.ErrNodeAlreadyExists: + // ok + default: + t.Fatalf("%v", err) + } + } + + ns, err := server.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 3 != len(ns) { + t.Fatalf("expected content to see %v, saw %v", 3, len(ns)) + } + if raftAddress != ns[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns[id].RaftAddress) + } + if grpcAddress != ns[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns[id].Metadata.GrpcAddress) + } + if httpAddress != ns[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns[id].Metadata.HttpAddress) + } + if raftAddress2 != ns[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns[id2].RaftAddress) + } + if grpcAddress2 != ns[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns[id2].Metadata.HttpAddress) + } + if raftAddress3 != ns[id3].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress3, ns[id3].RaftAddress) + } + if grpcAddress3 != ns[id3].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress3, ns[id3].Metadata.GrpcAddress) + } + if httpAddress3 != ns[id3].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress3, ns[id3].Metadata.HttpAddress) + } + + time.Sleep(3 * time.Second) + + ns2, err := server2.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 3 != len(ns2) { + t.Fatalf("expected content to see %v, saw %v", 3, len(ns2)) + } + if raftAddress != ns2[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns2[id].RaftAddress) + } + if grpcAddress != ns2[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns2[id].Metadata.GrpcAddress) + } + if httpAddress != ns2[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns2[id].Metadata.HttpAddress) + } + if raftAddress2 != ns2[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns2[id2].RaftAddress) + } + if grpcAddress2 != ns2[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns2[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns2[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns2[id2].Metadata.HttpAddress) + } + if raftAddress3 != ns2[id3].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress3, ns2[id3].RaftAddress) + } + if grpcAddress3 != ns2[id3].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress3, ns2[id3].Metadata.GrpcAddress) + } + if httpAddress3 != ns2[id3].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress3, ns2[id3].Metadata.HttpAddress) + } + + time.Sleep(3 * time.Second) + + ns3, err := server3.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 3 != len(ns3) { + t.Fatalf("expected content to see %v, saw %v", 3, len(ns3)) + } + if raftAddress != ns3[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns3[id].RaftAddress) + } + if grpcAddress != ns3[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns3[id].Metadata.GrpcAddress) + } + if httpAddress != ns3[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns3[id].Metadata.HttpAddress) + } + if raftAddress2 != ns3[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns3[id2].RaftAddress) + } + if grpcAddress2 != ns3[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns3[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns3[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns3[id2].Metadata.HttpAddress) + } + if raftAddress3 != ns3[id3].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress3, ns3[id3].RaftAddress) + } + if grpcAddress3 != ns3[id3].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress3, ns3[id3].Metadata.GrpcAddress) + } + if httpAddress3 != ns3[id3].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress3, ns3[id3].Metadata.HttpAddress) + } + + if err := server.Leave(id3); err != nil { + t.Fatalf("%v", err) + } + + ns, err = server.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 2 != len(ns) { + t.Fatalf("expected content to see %v, saw %v", 2, len(ns)) + } + if raftAddress != ns[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns[id].RaftAddress) + } + if grpcAddress != ns[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns[id].Metadata.GrpcAddress) + } + if httpAddress != ns[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns[id].Metadata.HttpAddress) + } + if raftAddress2 != ns[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns[id2].RaftAddress) + } + if grpcAddress2 != ns[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns[id2].Metadata.HttpAddress) + } + if _, ok := ns[id3]; ok { + t.Fatalf("expected content to see %v, saw %v", false, ok) + } + + time.Sleep(3 * time.Second) + + ns2, err = server2.Nodes() + if err != nil { + t.Fatalf("%v", err) + } + if 2 != len(ns2) { + t.Fatalf("expected content to see %v, saw %v", 2, len(ns2)) + } + if raftAddress != ns2[id].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress, ns2[id].RaftAddress) + } + if grpcAddress != ns2[id].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress, ns2[id].Metadata.GrpcAddress) + } + if httpAddress != ns2[id].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress, ns2[id].Metadata.HttpAddress) + } + if raftAddress2 != ns2[id2].RaftAddress { + t.Fatalf("expected content to see %v, saw %v", raftAddress2, ns2[id2].RaftAddress) + } + if grpcAddress2 != ns2[id2].Metadata.GrpcAddress { + t.Fatalf("expected content to see %v, saw %v", grpcAddress2, ns2[id2].Metadata.GrpcAddress) + } + if httpAddress2 != ns2[id2].Metadata.HttpAddress { + t.Fatalf("expected content to see %v, saw %v", httpAddress2, ns2[id2].Metadata.HttpAddress) + } + if _, ok := ns2[id3]; ok { + t.Fatalf("expected content to see %v, saw %v", false, ok) + } +} + +func Test_RaftServer_Set(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(10 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + docId1 := "1" + docFieldsMap1 := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + docFields1, err := json.Marshal(docFieldsMap1) + if err != nil { + t.Fatalf("%v", err) + } + + setReq1 := &protobuf.SetRequest{ + Id: docId1, + Fields: docFields1, + } + + if err := server.Set(setReq1); err != nil { + t.Fatalf("%v", err) + } +} + +func Test_RaftServer_Get(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(10 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + docId1 := "1" + docFieldsMap1 := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + docFields1, err := json.Marshal(docFieldsMap1) + if err != nil { + t.Fatalf("%v", err) + } + + setReq1 := &protobuf.SetRequest{ + Id: docId1, + Fields: docFields1, + } + + if err := server.Set(setReq1); err != nil { + t.Fatalf("%v", err) + } + + f1, err := server.Get(docId1) + if err != nil { + t.Fatalf("%v", err) + } + if docFieldsMap1["title"] != f1["title"] { + t.Fatalf("expected content to see %v, saw %v", docFieldsMap1["title"], f1["title"]) + } +} + +func Test_RaftServer_Delete(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(10 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + docId1 := "1" + docFieldsMap1 := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + docFields1, err := json.Marshal(docFieldsMap1) + if err != nil { + t.Fatalf("%v", err) + } + + setReq1 := &protobuf.SetRequest{ + Id: docId1, + Fields: docFields1, + } + + if err := server.Set(setReq1); err != nil { + t.Fatalf("%v", err) + } + + f1, err := server.Get(docId1) + if err != nil { + t.Fatalf("%v", err) + } + if docFieldsMap1["title"] != f1["title"] { + t.Fatalf("expected content to see %v, saw %v", docFieldsMap1["title"], f1["title"]) + } + + deleteReq1 := &protobuf.DeleteRequest{ + Id: docId1, + } + + if err := server.Delete(deleteReq1); err != nil { + t.Fatalf("%v", err) + } + + f1, err = server.Get(docId1) + if err != nil { + switch err { + case errors.ErrNotFound: + //ok + default: + t.Fatalf("%v", err) + } + } + if f1 != nil { + t.Fatalf("expected content to see %v, saw %v", nil, f1) + } +} + +func Test_RaftServer_Snapshot(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + rafAddress := fmt.Sprintf(":%d", util.TmpPort()) + + dir := util.TmpDir() + defer func() { + _ = os.RemoveAll(dir) + }() + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + server, err := NewRaftServer("node1", rafAddress, dir, indexMapping, true, logger) + if err != nil { + t.Fatalf("%v", err) + } + + if err := server.Start(); err != nil { + t.Fatalf("%v", err) + } + defer func() { + if err := server.Stop(); err != nil { + t.Fatalf("%v", err) + } + }() + + if err := server.WaitForDetectLeader(10 * time.Second); err != nil { + t.Fatalf("%v", err) + } + + docId1 := "1" + docFieldsMap1 := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + docFields1, err := json.Marshal(docFieldsMap1) + if err != nil { + t.Fatalf("%v", err) + } + + setReq1 := &protobuf.SetRequest{ + Id: docId1, + Fields: docFields1, + } + + if err := server.Set(setReq1); err != nil { + t.Fatalf("%v", err) + } + + if err := server.Snapshot(); err != nil { + t.Fatalf("%v", err) + } +} diff --git a/sortutils/sort.go b/sortutils/sort.go deleted file mode 100644 index 9f41b7f..0000000 --- a/sortutils/sort.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sortutils - -import ( - "github.com/blevesearch/bleve/search" -) - -type MultiSearchHitSorter struct { - hits search.DocumentMatchCollection - sort search.SortOrder - cachedScoring []bool - cachedDesc []bool -} - -func NewMultiSearchHitSorter(sort search.SortOrder, hits search.DocumentMatchCollection) *MultiSearchHitSorter { - return &MultiSearchHitSorter{ - sort: sort, - hits: hits, - cachedScoring: sort.CacheIsScore(), - cachedDesc: sort.CacheDescending(), - } -} - -func (m *MultiSearchHitSorter) Len() int { - return len(m.hits) -} - -func (m *MultiSearchHitSorter) Swap(i, j int) { - m.hits[i], m.hits[j] = m.hits[j], m.hits[i] -} - -func (m *MultiSearchHitSorter) Less(i, j int) bool { - c := m.sort.Compare(m.cachedScoring, m.cachedDesc, m.hits[i], m.hits[j]) - - return c < 0 -} diff --git a/storage/index.go b/storage/index.go new file mode 100644 index 0000000..da50a0b --- /dev/null +++ b/storage/index.go @@ -0,0 +1,269 @@ +package storage + +import ( + "os" + "time" + + "github.com/blevesearch/bleve" + "github.com/blevesearch/bleve/document" + "github.com/blevesearch/bleve/index/scorch" + "github.com/blevesearch/bleve/mapping" + _ "github.com/mosuka/blast/builtin" + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/protobuf" + "go.uber.org/zap" +) + +type Index struct { + indexMapping *mapping.IndexMappingImpl + logger *zap.Logger + + index bleve.Index +} + +func NewIndex(dir string, indexMapping *mapping.IndexMappingImpl, logger *zap.Logger) (*Index, error) { + var index bleve.Index + + if _, err := os.Stat(dir); os.IsNotExist(err) { + // create new index + index, err = bleve.NewUsing(dir, indexMapping, scorch.Name, scorch.Name, nil) + if err != nil { + logger.Error("failed to create index", zap.String("dir", dir), zap.Error(err)) + return nil, err + } + } else { + // open existing index + index, err = bleve.OpenUsing(dir, map[string]interface{}{ + "create_if_missing": false, + "error_if_exists": false, + }) + if err != nil { + logger.Error("failed to open index", zap.String("dir", dir), zap.Error(err)) + return nil, err + } + } + + return &Index{ + index: index, + indexMapping: indexMapping, + logger: logger, + }, nil +} + +func (i *Index) Close() error { + if err := i.index.Close(); err != nil { + i.logger.Error("failed to close index", zap.Error(err)) + return err + } + + return nil +} + +func (i *Index) Get(id string) (map[string]interface{}, error) { + d, err := i.index.Document(id) + if err != nil { + i.logger.Error("failed to get document", zap.String("id", id), zap.Error(err)) + return nil, err + } + if d == nil { + err := errors.ErrNotFound + i.logger.Debug("document does not found", zap.String("id", id), zap.Error(err)) + return nil, err + } + + fields := make(map[string]interface{}, 0) + for _, f := range d.Fields { + var v interface{} + switch field := f.(type) { + case *document.TextField: + v = string(field.Value()) + case *document.NumericField: + n, err := field.Number() + if err == nil { + v = n + } + case *document.DateTimeField: + d, err := field.DateTime() + if err == nil { + v = d.Format(time.RFC3339Nano) + } + } + existing, existed := fields[f.Name()] + if existed { + switch existing := existing.(type) { + case []interface{}: + fields[f.Name()] = append(existing, v) + case interface{}: + arr := make([]interface{}, 2) + arr[0] = existing + arr[1] = v + fields[f.Name()] = arr + } + } else { + fields[f.Name()] = v + } + } + + return fields, nil +} + +func (i *Index) Search(searchRequest *bleve.SearchRequest) (*bleve.SearchResult, error) { + searchResult, err := i.index.Search(searchRequest) + if err != nil { + i.logger.Error("failed to search documents", zap.Any("search_request", searchRequest), zap.Error(err)) + return nil, err + } + + return searchResult, nil +} + +func (i *Index) Index(id string, fields map[string]interface{}) error { + if err := i.index.Index(id, fields); err != nil { + i.logger.Error("failed to index document", zap.String("id", id), zap.Error(err)) + return err + } + + return nil +} + +func (i *Index) Delete(id string) error { + if err := i.index.Delete(id); err != nil { + i.logger.Error("failed to delete document", zap.String("id", id), zap.Error(err)) + return err + } + + return nil +} + +func (i *Index) BulkIndex(docs []map[string]interface{}) (int, error) { + batch := i.index.NewBatch() + + count := 0 + + for _, doc := range docs { + id, ok := doc["id"].(string) + if !ok { + err := errors.ErrNil + i.logger.Error("missing id", zap.Error(err)) + continue + } + fields, ok := doc["fields"].(map[string]interface{}) + if !ok { + err := errors.ErrNil + i.logger.Error("missing fields", zap.Error(err)) + continue + } + + if err := batch.Index(id, fields); err != nil { + i.logger.Error("failed to index document in batch", zap.String("id", id), zap.Error(err)) + continue + } + count++ + } + + err := i.index.Batch(batch) + if err != nil { + i.logger.Error("failed to index documents", zap.Int("count", count), zap.Error(err)) + return count, err + } + + if count <= 0 { + err := errors.ErrNoUpdate + i.logger.Error("no documents updated", zap.Any("count", count), zap.Error(err)) + return count, err + } + + return count, nil +} + +func (i *Index) BulkDelete(ids []string) (int, error) { + batch := i.index.NewBatch() + + count := 0 + + for _, id := range ids { + batch.Delete(id) + count++ + } + + err := i.index.Batch(batch) + if err != nil { + i.logger.Error("failed to delete documents", zap.Int("count", count), zap.Error(err)) + return count, err + } + + return count, nil +} + +func (i *Index) Mapping() *mapping.IndexMappingImpl { + return i.indexMapping +} + +func (i *Index) Stats() map[string]interface{} { + return i.index.StatsMap() +} + +func (i *Index) SnapshotItems() <-chan *protobuf.Document { + ch := make(chan *protobuf.Document, 1024) + + go func() { + idx, _, err := i.index.Advanced() + if err != nil { + i.logger.Error("failed to get index", zap.Error(err)) + return + } + + ir, err := idx.Reader() + if err != nil { + i.logger.Error("failed to get index reader", zap.Error(err)) + return + } + + docCount := 0 + + dr, err := ir.DocIDReaderAll() + if err != nil { + i.logger.Error("failed to get doc ID reader", zap.Error(err)) + return + } + for { + //if dr == nil { + // i.logger.Error(err.Error()) + // break + //} + id, err := dr.Next() + if id == nil { + i.logger.Debug("finished to read all document IDs") + break + } else if err != nil { + i.logger.Warn("failed to get doc ID", zap.Error(err)) + continue + } + + // get original document + fieldsBytes, err := i.index.GetInternal(id) + if err != nil { + i.logger.Warn("failed to get doc fields bytes", zap.String("id", string(id)), zap.Error(err)) + continue + } + + doc := &protobuf.Document{ + Id: string(id), + Fields: fieldsBytes, + } + + ch <- doc + + docCount = docCount + 1 + } + + i.logger.Debug("finished to write all documents to channel") + ch <- nil + + i.logger.Info("finished to snapshot", zap.Int("count", docCount)) + + return + }() + + return ch +} diff --git a/storage/index_test.go b/storage/index_test.go new file mode 100644 index 0000000..72bd723 --- /dev/null +++ b/storage/index_test.go @@ -0,0 +1,341 @@ +package storage + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "testing" + "time" + + "github.com/mosuka/blast/errors" + "github.com/mosuka/blast/log" + "github.com/mosuka/blast/mapping" + "github.com/mosuka/blast/util" +) + +func TestClose(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + index, err := NewIndex(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if index == nil { + t.Fatal("failed to create index") + } + + if err := index.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func TestIndex(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + index, err := NewIndex(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if index == nil { + t.Fatal("failed to create index") + } + + id := "1" + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + + if err := index.Index(id, fields); err != nil { + t.Fatal("failed to index document") + } + + if err := index.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func TestGet(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + index, err := NewIndex(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if index == nil { + t.Fatal("failed to create index") + } + + id := "1" + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + + if err := index.Index(id, fields); err != nil { + t.Fatal("failed to index document") + } + + f, err := index.Get(id) + if err != nil { + t.Fatal("failed to get document") + } + if fields["title"].(string) != f["title"].(string) { + t.Fatalf("expected content to see %v, saw %v", fields["title"].(string), f["title"].(string)) + } + + if err := index.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func TestDelete(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + index, err := NewIndex(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if index == nil { + t.Fatal("failed to create index") + } + + id := "1" + fields := map[string]interface{}{ + "title": "Search engine (computing)", + "text": "A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web.", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + + if err := index.Index(id, fields); err != nil { + t.Fatal("failed to index document") + } + + fields, err = index.Get(id) + if err != nil { + t.Fatal("failed to get document") + } + + if err := index.Delete(id); err != nil { + t.Fatal("failed to delete document") + } + + fields, err = index.Get(id) + if err != nil { + switch err { + case errors.ErrNotFound: + // ok + default: + t.Fatal("failed to get document") + } + } + + if err := index.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func TestBulkIndex(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + index, err := NewIndex(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if index == nil { + t.Fatal("failed to create index") + } + + docs := make([]map[string]interface{}, 0) + for i := 1; i <= 100; i++ { + id := strconv.Itoa(i) + fields := map[string]interface{}{ + "title": fmt.Sprintf("Search engine (computing) %d", i), + "text": fmt.Sprintf("A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web. %d", i), + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + doc := map[string]interface{}{ + "id": id, + "fields": fields, + } + + docs = append(docs, doc) + } + + count, err := index.BulkIndex(docs) + if err != nil { + t.Fatal("failed to index documents") + } + if count <= 0 { + t.Fatal("failed to index documents") + } + + if err := index.Close(); err != nil { + t.Fatalf("%v", err) + } +} + +func TestBulkDelete(t *testing.T) { + curDir, err := os.Getwd() + if err != nil { + t.Fatalf("%v", err) + } + + tmpDir := util.TmpDir() + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + dir := filepath.Join(tmpDir, "index") + + indexMapping, err := mapping.NewIndexMappingFromFile(filepath.Join(curDir, "../examples/example_mapping.json")) + if err != nil { + t.Fatalf("%v", err) + } + + logger := log.NewLogger("WARN", "", 500, 3, 30, false) + + index, err := NewIndex(dir, indexMapping, logger) + if err != nil { + t.Fatalf("%v", err) + } + if index == nil { + t.Fatal("failed to create index") + } + + docs := make([]map[string]interface{}, 0) + for i := 1; i <= 100; i++ { + id := strconv.Itoa(i) + fields := map[string]interface{}{ + "title": fmt.Sprintf("Search engine (computing) %d", i), + "text": fmt.Sprintf("A search engine is an information retrieval system designed to help find information stored on a computer system. The search results are usually presented in a list and are commonly called hits. Search engines help to minimize the time required to find information and the amount of information which must be consulted, akin to other techniques for managing information overload. The most public, visible form of a search engine is a Web search engine which searches for information on the World Wide Web. %d", i), + "timestamp": time.Now().UTC().Format(time.RFC3339), + "_type": "example", + } + doc := map[string]interface{}{ + "id": id, + "fields": fields, + } + + docs = append(docs, doc) + } + + count, err := index.BulkIndex(docs) + if err != nil { + t.Fatal("failed to index documents") + } + if count <= 0 { + t.Fatal("failed to index documents") + } + + ids := make([]string, 0) + for i := 1; i <= 100; i++ { + id := strconv.Itoa(i) + + ids = append(ids, id) + } + + count, err = index.BulkDelete(ids) + if err != nil { + t.Fatal("failed to delete documents") + } + if count <= 0 { + t.Fatal("failed to delete documents") + } + + if err := index.Close(); err != nil { + t.Fatalf("%v", err) + } +} diff --git a/strutils/strutils.go b/strutils/strutils.go deleted file mode 100644 index 4ea086d..0000000 --- a/strutils/strutils.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strutils - -import ( - "math/rand" - "time" -) - -var randSrc = rand.NewSource(time.Now().UnixNano()) - -const ( - letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" - letterIdxBits = 6 - letterIdxMask = 1<= 0; { - if remain == 0 { - cache, remain = randSrc.Int63(), letterIdxMax - } - idx := int(cache & letterIdxMask) - if idx < len(letters) { - b[i] = letters[idx] - i-- - } - cache >>= letterIdxBits - remain-- - } - - return string(b) -} diff --git a/testutils/testutils.go b/testutils/testutils.go deleted file mode 100644 index 9e0ec2c..0000000 --- a/testutils/testutils.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package testutils - -import ( - "io/ioutil" - "net" -) - -func TmpDir() string { - tmp, _ := ioutil.TempDir("", "") - return tmp -} - -func TmpPort() int { - addr, err := net.ResolveTCPAddr("tcp", "localhost:0") - if err != nil { - return -1 - } - - l, err := net.ListenTCP("tcp", addr) - if err != nil { - return -1 - } - - defer func() { - _ = l.Close() - }() - - return l.Addr().(*net.TCPAddr).Port -} diff --git a/util/temp.go b/util/temp.go new file mode 100644 index 0000000..8f3208f --- /dev/null +++ b/util/temp.go @@ -0,0 +1,29 @@ +package util + +import ( + "io/ioutil" + "net" +) + +func TmpDir() string { + tmp, _ := ioutil.TempDir("", "") + return tmp +} + +func TmpPort() int { + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return -1 + } + + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return -1 + } + + defer func() { + _ = l.Close() + }() + + return l.Addr().(*net.TCPAddr).Port +} diff --git a/version/version.go b/version/version.go index 328268a..1895fc9 100644 --- a/version/version.go +++ b/version/version.go @@ -1,17 +1,3 @@ -// Copyright (c) 2019 Minoru Osuka -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package version var ( From fe53e18e703bda84134a33de02c91d2d2e19d046 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Tue, 7 Apr 2020 17:01:14 +0900 Subject: [PATCH 2/2] Update Changes.md --- CHANGES.md | 105 ++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 75 insertions(+), 30 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 37860d7..b660b65 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,42 +5,87 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). -## [v0.3.1] 2020-04-01 +## [Unreleased] -- Update protobuf #33 @mosuka +- Add coverage to Makefile #114 +- Docker compose #119 +- Bump Bleve version to v0.8.1 #117 -## [v0.3.0] 2020-03-31 -- Add health check endpoints #32 @mosuka -- Add some metrics #31 @mosuka -- Allow CLI options to be read from the configuration file #29 @mosuka -- Fix gateway bug #26 @mosuka -- Support TLS #25 @mosuka -- Add keepalive options #24 @mosuka -- Improve cluster watching #22 @mosuka -- Refactoring #21 @mosuka -- Update Makefile #20 @mosuka +## [v0.8.1] -## [v0.2.0] 2020-03-19 +- Update go version and dependencies #109 -- Add join endpoint #19 @mosuka -- Add leave endpoint #18 @mosuka -- Add snapshot endpoint #17 @mosuka -- Disable raft-badgerdb logging #16 @mosuka -- Migrate to grpc-gateway #15 @mosuka -- Add metrics command #14 @mosuka -- Use raft-badger #13 @mosuka -- Refactoring #12 @mosuka -- Refactoring #11 @mosuka -- Refactoring #10 @mosuka -- Upgrade Badger #9 @mosuka -- Upgrade Raft #8 @mosuka -- Refactoring #7 @mosuka -## [v0.1.1] 2019-11-05 +## [v0.8.0] -- Fix bugs in defer #5 @mosuka +- Add swagger specification experimentaly #107 +- New CLI #82 +- Split protobuf into components #84 +- Change subcommands #85 +- Update protobuf #86 +- Change protobuf #87 +- Change the cluster watching method #90 +- Change cluster watch command for manager #92 +- Change node state to enum from string #93 +- Change node info structure #94 +- Change protobuf for indexer and dispatcher #95 +- Change server arguments #96 +- Change index protobuf #97 +- Use protobuf document #98 +- Change node state to Node_SHUTDOWN in a error #99 +- Fix a bug for waiting to receive an indexer cluster updates from the stream #100 +- Migrate to grpc-gateway #105 -## [v0.1.0] 2019-03-30 -- First release @mosuka +## [v0.7.1] - 2019-07-18 + +- Add raft-badger #69 +- Add raft-storage-type flag #73 +- Add gRPC access logger #74 +- Improve indexing performance #71 +- Remove original document #72 +- Rename config package to builtins #75 + + +## [v0.7.0] - 2019-07-03 + +- Add GEO search example #65 +- Migrate grpc-middleware #68 + + +## [v0.6.1] - 2019-06-21 + +- Fix HTTP response into JSON format #64 +- Update Dockerfile #62 + + +## [v0.6.0] - 2019-06-19 + +- Add federated search #30 +- Add cluster manager (#48) +- Add KVS HTTP handlers #46 +- Update http logger #51 +- Update logutils (#50) +- Remve KVS (#49) + + +## [v0.5.0] - 2019-03-22 + +- Support bulk update #41 +- Support Badger #38 +- Add index stats #37 +- Add Wikipedia example #35 +- Support cznicb and leveldb #34 +- Add logging #33 +- Add CHANGES.md #29 +- Add error handling for server startup #28. +- Fixed some badger bugs #40 +- Restructure store package #36 +- Update examples #32 +- update Makefile #31 + + +## [v0.4.0] - 2019-03-14 + +- Code refactoring.